input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from __future__ import annotations
from .CSRLoss import CSRLoss, CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegativesRankingLoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseGISTEmbedLoss import SparseGISTEmbedLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
]
# TODO: Test cached losses
|
from __future__ import annotations
from .CSRLoss import CSRLoss
from .CSRReconstructionLoss import CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegativesRankingLoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseGISTEmbedLoss import SparseGISTEmbedLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
]
# TODO: Test cached losses
|
"""Copyright 2024, XGBoost contributors"""
import pytest
from dask_cuda import LocalCUDACluster
from distributed import Client
import xgboost as xgb
from xgboost import dask as dxgb
from xgboost.testing.dask import check_external_memory
@pytest.mark.parametrize("is_qdm", [True, False])
def test_external_memory(is_qdm: bool) -> None:
n_workers = 2
with LocalCUDACluster(n_workers=2) as cluster:
with Client(cluster) as client:
args = client.sync(
dxgb._get_rabit_args,
2,
None,
client,
)
futs = client.map(
check_external_memory,
range(n_workers),
n_workers=n_workers,
device="cuda",
comm_args=args,
is_qdm=is_qdm,
)
client.gather(futs)
|
"""Copyright 2024, XGBoost contributors"""
import pytest
from dask_cuda import LocalCUDACluster
from distributed import Client
import xgboost as xgb
from xgboost.testing.dask import check_external_memory
@pytest.mark.parametrize("is_qdm", [True, False])
def test_external_memory(is_qdm: bool) -> None:
n_workers = 2
with LocalCUDACluster(n_workers=2) as cluster:
with Client(cluster) as client:
args = client.sync(
xgb.dask._get_rabit_args,
2,
None,
client,
)
futs = client.map(
check_external_memory,
range(n_workers),
n_workers=n_workers,
device="cuda",
comm_args=args,
is_qdm=is_qdm,
)
client.gather(futs)
|
from functools import partial
from inspect import isclass
from typing import Any, Union, cast
from pydantic import BaseModel
from langchain_core.language_models import FakeListChatModel
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.messages import HumanMessage
from langchain_core.prompts.structured import StructuredPrompt
from langchain_core.runnables.base import Runnable, RunnableLambda
from langchain_core.utils.pydantic import is_basemodel_subclass
def _fake_runnable(
_: Any, *, schema: Union[dict, type[BaseModel]], value: Any = 42, **_kwargs: Any
) -> Union[BaseModel, dict]:
if isclass(schema) and is_basemodel_subclass(schema):
return schema(name="yo", value=value)
params = cast("dict", schema)["parameters"]
return {k: 1 if k != "value" else value for k, v in params.items()}
class FakeStructuredChatModel(FakeListChatModel):
"""Fake ChatModel for testing purposes."""
def with_structured_output(
self, schema: Union[dict, type[BaseModel]], **kwargs: Any
) -> Runnable:
return RunnableLambda(partial(_fake_runnable, schema=schema, **kwargs))
@property
def _llm_type(self) -> str:
return "fake-messages-list-chat-model"
def test_structured_prompt_pydantic() -> None:
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
OutputSchema,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=42)
def test_structured_prompt_dict() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
def test_structured_prompt_kwargs() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
value=7,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[("human", "I'm very structured, how about you?")], OutputSchema, value=7
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=7)
def test_structured_prompt_template_format() -> None:
prompt = StructuredPrompt(
[("human", "hi {{person.name}}")], schema={}, template_format="mustache"
)
assert prompt.messages[0].prompt.template_format == "mustache" # type: ignore[union-attr, union-attr]
assert prompt.input_variables == ["person"]
assert prompt.invoke({"person": {"name": "foo"}}).to_messages() == [
HumanMessage("hi foo")
]
|
from functools import partial
from inspect import isclass
from typing import Any, Union, cast
from pydantic import BaseModel
from langchain_core.language_models import FakeListChatModel
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.messages import HumanMessage
from langchain_core.prompts.structured import StructuredPrompt
from langchain_core.runnables.base import Runnable, RunnableLambda
from langchain_core.utils.pydantic import is_basemodel_subclass
def _fake_runnable(
_: Any, *, schema: Union[dict, type[BaseModel]], value: Any = 42, **_kwargs: Any
) -> Union[BaseModel, dict]:
if isclass(schema) and is_basemodel_subclass(schema):
return schema(name="yo", value=value)
params = cast("dict", schema)["parameters"]
return {k: 1 if k != "value" else value for k, v in params.items()}
class FakeStructuredChatModel(FakeListChatModel):
"""Fake ChatModel for testing purposes."""
def with_structured_output(
self, schema: Union[dict, type[BaseModel]], **kwargs: Any
) -> Runnable:
return RunnableLambda(partial(_fake_runnable, schema=schema, **kwargs))
@property
def _llm_type(self) -> str:
return "fake-messages-list-chat-model"
FakeStructuredChatModel.model_rebuild()
def test_structured_prompt_pydantic() -> None:
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
OutputSchema,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=42)
def test_structured_prompt_dict() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
def test_structured_prompt_kwargs() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
value=7,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[("human", "I'm very structured, how about you?")], OutputSchema, value=7
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=7)
def test_structured_prompt_template_format() -> None:
prompt = StructuredPrompt(
[("human", "hi {{person.name}}")], schema={}, template_format="mustache"
)
assert prompt.messages[0].prompt.template_format == "mustache" # type: ignore[union-attr, union-attr]
assert prompt.input_variables == ["person"]
assert prompt.invoke({"person": {"name": "foo"}}).to_messages() == [
HumanMessage("hi foo")
]
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='DDOD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='DDODHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
# assigner is mean cls_assigner
assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8),
reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='DDOD',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='DDODHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
# assigner is mean cls_assigner
assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8),
reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# This `persistent_workers` is only valid when PyTorch>=1.7.0
data = dict(persistent_workers=True)
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
from __future__ import annotations
__version__ = "4.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
]
|
from __future__ import annotations
__version__ = "4.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
]
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import DocumentArray, Executor, requests
from jinahub.indexers.searcher.FaissSearcher import FaissSearcher
from jinahub.indexers.storage.LMDBStorage import LMDBStorage
class FaissLMDBSearcher(Executor):
"""
`Document` with `.embedding` the same shape as the `Documents` stored in the
`FaissSearcher`. The ids of the `Documents` stored in `FaissSearcher` need to
exist in the `FileSearcher`. Otherwise you will not get back the original metadata.
The `FaissSearcher` attaches matches to the `Documents` sent as inputs, with the id of
the match, and its embedding. Then, the `FileSearcher` retrieves the full metadata
(original text or image blob) and attaches those to the `Document`. You receive back
the full `Document`.
"""
def __init__(self, dump_path=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._vec_indexer = FaissSearcher(dump_path=dump_path, *args, **kwargs)
self._kv_indexer = LMDBStorage(dump_path=dump_path, *args, **kwargs)
@requests(on="/search")
def search(self, docs: "DocumentArray", parameters: Dict = None, **kwargs):
self._vec_indexer.search(docs, parameters)
kv_parameters = copy.deepcopy(parameters)
kv_parameters["traversal_paths"] = [
path + "m" for path in kv_parameters.get("traversal_paths", ["r"])
]
self._kv_indexer.search(docs, parameters=kv_parameters)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import DocumentArray, Executor, requests
from jinahub.indexers.searcher.FaissSearcher import FaissSearcher
from jinahub.indexers.storage.LMDBStorage import LMDBStorage
class FaissLMDBSearcher(Executor):
def __init__(self, dump_path=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._vec_indexer = FaissSearcher(dump_path=dump_path, *args, **kwargs)
self._kv_indexer = LMDBStorage(dump_path=dump_path, *args, **kwargs)
@requests(on="/search")
def search(self, docs: "DocumentArray", parameters: Dict = None, **kwargs):
self._vec_indexer.search(docs, parameters)
kv_parameters = copy.deepcopy(parameters)
kv_parameters["traversal_paths"] = [
path + "m" for path in kv_parameters.get("traversal_paths", ["r"])
]
self._kv_indexer.search(docs, parameters=kv_parameters)
|
from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocList
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocList v1.
It can be useful to start migrating a codebase from v1 to v2.
Nevertheless, the API is not totally compatible with DocAray v1 `Document`.
Indeed, none of the method associated with `Document` are present. Only the schema
of the data is similar.
```python
from docarray import DocList
from docarray.documents.legacy import LegacyDocument
import numpy as np
doc = LegacyDocument(text='hello')
doc.url = 'http://myimg.png'
doc.tensor = np.zeros((3, 224, 224))
doc.embedding = np.zeros((100, 1))
doc.tags['price'] = 10
doc.chunks = DocList[Document]([Document() for _ in range(10)])
doc.chunks = DocList[Document]([Document() for _ in range(10)])
```
"""
tensor: Optional[AnyTensor]
chunks: Optional[DocList[LegacyDocument]]
matches: Optional[DocList[LegacyDocument]]
blob: Optional[bytes]
text: Optional[str]
url: Optional[str]
embedding: Optional[AnyEmbedding]
tags: Dict[str, Any] = dict()
scores: Optional[Dict[str, Any]]
|
from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocList
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocList v1.
It can be useful to start migrating a codebase from v1 to v2.
Nevertheless, the API is not totally compatible with DocAray v1 `Document`.
Indeed, none of the method associated with `Document` are present. Only the schema
of the data is similar.
.. code-block:: python
from docarray import DocList
from docarray.documents.legacy import LegacyDocument
import numpy as np
doc = LegacyDocument(text='hello')
doc.url = 'http://myimg.png'
doc.tensor = np.zeros((3, 224, 224))
doc.embedding = np.zeros((100, 1))
doc.tags['price'] = 10
doc.chunks = DocList[Document]([Document() for _ in range(10)])
doc.chunks = DocList[Document]([Document() for _ in range(10)])
"""
tensor: Optional[AnyTensor]
chunks: Optional[DocList[LegacyDocument]]
matches: Optional[DocList[LegacyDocument]]
blob: Optional[bytes]
text: Optional[str]
url: Optional[str]
embedding: Optional[AnyEmbedding]
tags: Dict[str, Any] = dict()
scores: Optional[Dict[str, Any]]
|
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Dict, Callable
import pytest
from jina import DocumentArray
from ...transform_encoder import TransformerTorchEncoder
MODELS_TO_TEST = [
'sentence-transformers/distilbert-base-nli-stsb-mean-tokens',
'bert-base-uncased',
'distilroberta-base',
'distilbert-base-cased-distilled-squad',
]
@pytest.mark.parametrize(
'model_name', MODELS_TO_TEST
)
def test_load_torch_models(model_name: str, data_generator: Callable):
encoder = TransformerTorchEncoder(pretrained_model_name_or_path=model_name)
docs = DocumentArray([doc for doc in data_generator()])
encoder.encode(
docs=docs,
parameters={}
)
for doc in docs:
assert doc.embedding is not None
|
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Dict, Callable
import pytest
from jina import DocumentArray
from jinahub.encoder.transform_encoder import TransformerTorchEncoder
MODELS_TO_TEST = [
'sentence-transformers/distilbert-base-nli-stsb-mean-tokens',
'bert-base-uncased',
'distilroberta-base',
'distilbert-base-cased-distilled-squad',
]
@pytest.mark.parametrize(
'model_name', MODELS_TO_TEST
)
def test_load_torch_models(model_name: str, data_generator: Callable):
encoder = TransformerTorchEncoder(pretrained_model_name_or_path=model_name)
docs = DocumentArray([doc for doc in data_generator()])
encoder.encode(
docs=docs,
parameters={}
)
for doc in docs:
assert doc.embedding is not None
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.queue import RedisExecutionEventBus
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return Config().database_api_port
@expose
def send_execution_update(self, execution_result_dict: dict[Any, Any]):
self.event_queue.publish(ExecutionResult(**execution_result_dict))
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
get_or_refill_credit = cast(
Callable[[Any, str], int],
exposed_run_and_wait(user_credit_model.get_or_refill_credit),
)
spend_credits = cast(
Callable[[Any, str, int, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.queue import RedisExecutionEventBus
from backend.data.user import get_user_metadata, update_user_metadata
from backend.util.service import AppService, expose
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return Config().database_api_port
@expose
def send_execution_update(self, execution_result_dict: dict[Any, Any]):
self.event_queue.publish(ExecutionResult(**execution_result_dict))
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
get_or_refill_credit = cast(
Callable[[Any, str], int],
exposed_run_and_wait(user_credit_model.get_or_refill_credit),
)
spend_credits = cast(
Callable[[Any, str, int, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
|
from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class ComparisonOperator(Enum):
EQUAL = "=="
NOT_EQUAL = "!="
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
LESS_THAN_OR_EQUAL = "<="
class ConditionBlock(Block):
class Input(BlockSchema):
value1: Any = SchemaField(
description="Enter the first value for comparison",
placeholder="For example: 10 or 'hello' or True",
)
operator: ComparisonOperator = SchemaField(
description="Choose the comparison operator",
placeholder="Select an operator",
)
value2: Any = SchemaField(
description="Enter the second value for comparison",
placeholder="For example: 20 or 'world' or False",
)
yes_value: Any = SchemaField(
description="(Optional) Value to output if the condition is true. If not provided, value1 will be used.",
placeholder="Leave empty to use value1, or enter a specific value",
default=None,
)
no_value: Any = SchemaField(
description="(Optional) Value to output if the condition is false. If not provided, value1 will be used.",
placeholder="Leave empty to use value1, or enter a specific value",
default=None,
)
class Output(BlockSchema):
result: bool = SchemaField(
description="The result of the condition evaluation (True or False)"
)
yes_output: Any = SchemaField(
description="The output value if the condition is true"
)
no_output: Any = SchemaField(
description="The output value if the condition is false"
)
def __init__(self):
super().__init__(
id="715696a0-e1da-45c8-b209-c2fa9c3b0be6",
input_schema=ConditionBlock.Input,
output_schema=ConditionBlock.Output,
description="Handles conditional logic based on comparison operators",
categories={BlockCategory.LOGIC},
test_input={
"value1": 10,
"operator": ComparisonOperator.GREATER_THAN.value,
"value2": 5,
"yes_value": "Greater",
"no_value": "Not greater",
},
test_output=[
("result", True),
("yes_output", "Greater"),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
operator = input_data.operator
value1 = input_data.value1
if isinstance(value1, str):
value1 = float(value1.strip())
value2 = input_data.value2
if isinstance(value2, str):
value2 = float(value2.strip())
yes_value = input_data.yes_value if input_data.yes_value is not None else value1
no_value = input_data.no_value if input_data.no_value is not None else value2
comparison_funcs = {
ComparisonOperator.EQUAL: lambda a, b: a == b,
ComparisonOperator.NOT_EQUAL: lambda a, b: a != b,
ComparisonOperator.GREATER_THAN: lambda a, b: a > b,
ComparisonOperator.LESS_THAN: lambda a, b: a < b,
ComparisonOperator.GREATER_THAN_OR_EQUAL: lambda a, b: a >= b,
ComparisonOperator.LESS_THAN_OR_EQUAL: lambda a, b: a <= b,
}
result = comparison_funcs[operator](value1, value2)
yield "result", result
if result:
yield "yes_output", yes_value
else:
yield "no_output", no_value
|
from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class ComparisonOperator(Enum):
EQUAL = "=="
NOT_EQUAL = "!="
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
LESS_THAN_OR_EQUAL = "<="
class ConditionBlock(Block):
class Input(BlockSchema):
value1: Any = SchemaField(
description="Enter the first value for comparison",
placeholder="For example: 10 or 'hello' or True",
)
operator: ComparisonOperator = SchemaField(
description="Choose the comparison operator",
placeholder="Select an operator",
)
value2: Any = SchemaField(
description="Enter the second value for comparison",
placeholder="For example: 20 or 'world' or False",
)
yes_value: Any = SchemaField(
description="(Optional) Value to output if the condition is true. If not provided, value1 will be used.",
placeholder="Leave empty to use value1, or enter a specific value",
default=None,
)
no_value: Any = SchemaField(
description="(Optional) Value to output if the condition is false. If not provided, value1 will be used.",
placeholder="Leave empty to use value1, or enter a specific value",
default=None,
)
class Output(BlockSchema):
result: bool = SchemaField(
description="The result of the condition evaluation (True or False)"
)
yes_output: Any = SchemaField(
description="The output value if the condition is true"
)
no_output: Any = SchemaField(
description="The output value if the condition is false"
)
def __init__(self):
super().__init__(
id="715696a0-e1da-45c8-b209-c2fa9c3b0be6",
input_schema=ConditionBlock.Input,
output_schema=ConditionBlock.Output,
description="Handles conditional logic based on comparison operators",
categories={BlockCategory.LOGIC},
test_input={
"value1": 10,
"operator": ComparisonOperator.GREATER_THAN.value,
"value2": 5,
"yes_value": "Greater",
"no_value": "Not greater",
},
test_output=[
("result", True),
("yes_output", "Greater"),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
value1 = input_data.value1
operator = input_data.operator
value2 = input_data.value2
yes_value = input_data.yes_value if input_data.yes_value is not None else value1
no_value = input_data.no_value if input_data.no_value is not None else value1
comparison_funcs = {
ComparisonOperator.EQUAL: lambda a, b: a == b,
ComparisonOperator.NOT_EQUAL: lambda a, b: a != b,
ComparisonOperator.GREATER_THAN: lambda a, b: a > b,
ComparisonOperator.LESS_THAN: lambda a, b: a < b,
ComparisonOperator.GREATER_THAN_OR_EQUAL: lambda a, b: a >= b,
ComparisonOperator.LESS_THAN_OR_EQUAL: lambda a, b: a <= b,
}
try:
result = comparison_funcs[operator](value1, value2)
yield "result", result
if result:
yield "yes_output", yes_value
else:
yield "no_output", no_value
except Exception:
yield "result", None
yield "yes_output", None
yield "no_output", None
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py',
'./retinanet_tta.py'
]
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _extract_tar
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "waves_yesno",
"url": "http://www.openslr.org/resources/1/waves_yesno.tar.gz",
"checksum": "c3f49e0cca421f96b75b41640749167b52118f232498667ca7a5f9416aef8e73",
}
}
class YESNO(Dataset):
"""*YesNo* :cite:`YesNo` dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"http://www.openslr.org/resources/1/waves_yesno.tar.gz"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"waves_yesno"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
archive = os.path.basename(url)
archive = root / archive
self._path = root / folder_in_archive
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
_extract_tar(archive)
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
self._walker = sorted(str(p.stem) for p in Path(self._path).glob("*.wav"))
def _load_item(self, fileid: str, path: str):
labels = [int(c) for c in fileid.split("_")]
file_audio = os.path.join(path, fileid + ".wav")
waveform, sample_rate = torchaudio.load(file_audio)
return waveform, sample_rate, labels
def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
List[int]:
labels
"""
fileid = self._walker[n]
item = self._load_item(fileid, self._path)
return item
def __len__(self) -> int:
return len(self._walker)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "waves_yesno",
"url": "http://www.openslr.org/resources/1/waves_yesno.tar.gz",
"checksum": "c3f49e0cca421f96b75b41640749167b52118f232498667ca7a5f9416aef8e73",
}
}
class YESNO(Dataset):
"""*YesNo* :cite:`YesNo` dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"http://www.openslr.org/resources/1/waves_yesno.tar.gz"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"waves_yesno"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
archive = os.path.basename(url)
archive = root / archive
self._path = root / folder_in_archive
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
self._walker = sorted(str(p.stem) for p in Path(self._path).glob("*.wav"))
def _load_item(self, fileid: str, path: str):
labels = [int(c) for c in fileid.split("_")]
file_audio = os.path.join(path, fileid + ".wav")
waveform, sample_rate = torchaudio.load(file_audio)
return waveform, sample_rate, labels
def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
List[int]:
labels
"""
fileid = self._walker[n]
item = self._load_item(fileid, self._path)
return item
def __len__(self) -> int:
return len(self._walker)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/1912.08193>`_.
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(PointRend, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/1912.08193>`_.
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(PointRend, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_sampler import BaseSampler
from .combined_sampler import CombinedSampler
from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
from .iou_balanced_neg_sampler import IoUBalancedNegSampler
from .ohem_sampler import OHEMSampler
from .pseudo_sampler import PseudoSampler
from .random_sampler import RandomSampler
from .sampling_result import SamplingResult
from .score_hlr_sampler import ScoreHLRSampler
__all__ = [
'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler'
]
|
from .base_sampler import BaseSampler
from .combined_sampler import CombinedSampler
from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
from .iou_balanced_neg_sampler import IoUBalancedNegSampler
from .ohem_sampler import OHEMSampler
from .pseudo_sampler import PseudoSampler
from .random_sampler import RandomSampler
from .sampling_result import SamplingResult
from .score_hlr_sampler import ScoreHLRSampler
__all__ = [
'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler'
]
|
"""
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning.
It is available for more than 300 languages.
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like
"""
import gzip
import os
import tarfile
import sentence_transformers
# Note: Tatoeba uses 3 letter languages codes (ISO-639-2),
# while other datasets like OPUS use 2 letter language codes (ISO-639-1)
# For training of sentence transformers, which type of language code is used doesn't matter.
# For language codes, see: https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes
source_languages = set(["eng"])
target_languages = set(["deu", "ara", "tur", "spa", "ita", "fra"])
num_dev_sentences = 1000 # Number of sentences that are used to create a development set
tatoeba_folder = "../datasets/tatoeba"
output_folder = "parallel-sentences/"
sentences_file_bz2 = os.path.join(tatoeba_folder, "sentences.tar.bz2")
sentences_file = os.path.join(tatoeba_folder, "sentences.csv")
links_file_bz2 = os.path.join(tatoeba_folder, "links.tar.bz2")
links_file = os.path.join(tatoeba_folder, "links.csv")
download_url = "https://downloads.tatoeba.org/exports/"
os.makedirs(tatoeba_folder, exist_ok=True)
os.makedirs(output_folder, exist_ok=True)
# Download files if needed
for filepath in [sentences_file_bz2, links_file_bz2]:
if not os.path.exists(filepath):
url = download_url + os.path.basename(filepath)
print("Download", url)
sentence_transformers.util.http_get(url, filepath)
# Extract files if needed
if not os.path.exists(sentences_file):
print("Extract", sentences_file_bz2)
tar = tarfile.open(sentences_file_bz2, "r:bz2")
tar.extract("sentences.csv", path=tatoeba_folder)
tar.close()
if not os.path.exists(links_file):
print("Extract", links_file_bz2)
tar = tarfile.open(links_file_bz2, "r:bz2")
tar.extract("links.csv", path=tatoeba_folder)
tar.close()
# Read sentences
sentences = {}
all_langs = target_languages.union(source_languages)
print("Read sentences.csv file")
with open(sentences_file, encoding="utf8") as fIn:
for line in fIn:
id, lang, sentence = line.strip().split("\t")
if lang in all_langs:
sentences[id] = (lang, sentence)
# Read links that map the translations between different languages
print("Read links.csv")
translations = {src_lang: {trg_lang: {} for trg_lang in target_languages} for src_lang in source_languages}
with open(links_file, encoding="utf8") as fIn:
for line in fIn:
src_id, target_id = line.strip().split()
if src_id in sentences and target_id in sentences:
src_lang, src_sent = sentences[src_id]
trg_lang, trg_sent = sentences[target_id]
if src_lang in source_languages and trg_lang in target_languages:
if src_sent not in translations[src_lang][trg_lang]:
translations[src_lang][trg_lang][src_sent] = []
translations[src_lang][trg_lang][src_sent].append(trg_sent)
# Write everything to the output folder
print("Write output files")
for src_lang in source_languages:
for trg_lang in target_languages:
source_sentences = list(translations[src_lang][trg_lang])
train_sentences = source_sentences[num_dev_sentences:]
dev_sentences = source_sentences[0:num_dev_sentences]
print("{}-{} has {} sentences".format(src_lang, trg_lang, len(source_sentences)))
if len(dev_sentences) > 0:
with gzip.open(
os.path.join(output_folder, "Tatoeba-{}-{}-dev.tsv.gz".format(src_lang, trg_lang)),
"wt",
encoding="utf8",
) as fOut:
for sent in dev_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
if len(train_sentences) > 0:
with gzip.open(
os.path.join(output_folder, "Tatoeba-{}-{}-train.tsv.gz".format(src_lang, trg_lang)),
"wt",
encoding="utf8",
) as fOut:
for sent in train_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
print("---DONE---")
|
"""
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning.
It is available for more than 300 languages.
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like
"""
import os
import sentence_transformers
import tarfile
import gzip
# Note: Tatoeba uses 3 letter languages codes (ISO-639-2),
# while other datasets like OPUS use 2 letter language codes (ISO-639-1)
# For training of sentence transformers, which type of language code is used doesn't matter.
# For language codes, see: https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes
source_languages = set(["eng"])
target_languages = set(["deu", "ara", "tur", "spa", "ita", "fra"])
num_dev_sentences = 1000 # Number of sentences that are used to create a development set
tatoeba_folder = "../datasets/tatoeba"
output_folder = "parallel-sentences/"
sentences_file_bz2 = os.path.join(tatoeba_folder, "sentences.tar.bz2")
sentences_file = os.path.join(tatoeba_folder, "sentences.csv")
links_file_bz2 = os.path.join(tatoeba_folder, "links.tar.bz2")
links_file = os.path.join(tatoeba_folder, "links.csv")
download_url = "https://downloads.tatoeba.org/exports/"
os.makedirs(tatoeba_folder, exist_ok=True)
os.makedirs(output_folder, exist_ok=True)
# Download files if needed
for filepath in [sentences_file_bz2, links_file_bz2]:
if not os.path.exists(filepath):
url = download_url + os.path.basename(filepath)
print("Download", url)
sentence_transformers.util.http_get(url, filepath)
# Extract files if needed
if not os.path.exists(sentences_file):
print("Extract", sentences_file_bz2)
tar = tarfile.open(sentences_file_bz2, "r:bz2")
tar.extract("sentences.csv", path=tatoeba_folder)
tar.close()
if not os.path.exists(links_file):
print("Extract", links_file_bz2)
tar = tarfile.open(links_file_bz2, "r:bz2")
tar.extract("links.csv", path=tatoeba_folder)
tar.close()
# Read sentences
sentences = {}
all_langs = target_languages.union(source_languages)
print("Read sentences.csv file")
with open(sentences_file, encoding="utf8") as fIn:
for line in fIn:
id, lang, sentence = line.strip().split("\t")
if lang in all_langs:
sentences[id] = (lang, sentence)
# Read links that map the translations between different languages
print("Read links.csv")
translations = {src_lang: {trg_lang: {} for trg_lang in target_languages} for src_lang in source_languages}
with open(links_file, encoding="utf8") as fIn:
for line in fIn:
src_id, target_id = line.strip().split()
if src_id in sentences and target_id in sentences:
src_lang, src_sent = sentences[src_id]
trg_lang, trg_sent = sentences[target_id]
if src_lang in source_languages and trg_lang in target_languages:
if src_sent not in translations[src_lang][trg_lang]:
translations[src_lang][trg_lang][src_sent] = []
translations[src_lang][trg_lang][src_sent].append(trg_sent)
# Write everything to the output folder
print("Write output files")
for src_lang in source_languages:
for trg_lang in target_languages:
source_sentences = list(translations[src_lang][trg_lang])
train_sentences = source_sentences[num_dev_sentences:]
dev_sentences = source_sentences[0:num_dev_sentences]
print("{}-{} has {} sentences".format(src_lang, trg_lang, len(source_sentences)))
if len(dev_sentences) > 0:
with gzip.open(
os.path.join(output_folder, "Tatoeba-{}-{}-dev.tsv.gz".format(src_lang, trg_lang)),
"wt",
encoding="utf8",
) as fOut:
for sent in dev_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
if len(train_sentences) > 0:
with gzip.open(
os.path.join(output_folder, "Tatoeba-{}-{}-train.tsv.gz".format(src_lang, trg_lang)),
"wt",
encoding="utf8",
) as fOut:
for sent in train_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
print("---DONE---")
|
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3)
L_x, L_y = 89, 43
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
actual = fn(x, y, mode)
expected = torch.stack(
[
torch.stack(
[fn(x[i, j].unsqueeze(0), y[i, j].unsqueeze(0), mode).squeeze(0) for j in range(leading_dims[1])]
)
for i in range(leading_dims[0])
]
)
self.assertEqual(expected, actual)
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device) * 10
actual = F.add_noise(waveform, noise, snr, lengths)
expected = []
for i in range(leading_dims[0]):
for j in range(leading_dims[1]):
for k in range(leading_dims[2]):
expected.append(F.add_noise(waveform[i][j][k], noise[i][j][k], snr[i][j][k], lengths[i][j][k]))
self.assertEqual(torch.stack(expected), actual.reshape(-1, L))
def test_speed(self):
B = 5
orig_freq = 100
factor = 0.8
input_lengths = torch.randint(1, 1000, (B,), dtype=torch.int32)
unbatched_input = [torch.ones((int(length),)) * 1.0 for length in input_lengths]
batched_input = torch.nn.utils.rnn.pad_sequence(unbatched_input, batch_first=True)
output, output_lengths = F.speed(batched_input, input_lengths, orig_freq=orig_freq, factor=factor)
unbatched_output = []
unbatched_output_lengths = []
for idx in range(len(unbatched_input)):
w, l = F.speed(unbatched_input[idx], input_lengths[idx], orig_freq=orig_freq, factor=factor)
unbatched_output.append(w)
unbatched_output_lengths.append(l)
self.assertEqual(output_lengths, torch.stack(unbatched_output_lengths))
for idx in range(len(unbatched_output)):
w, l = output[idx], output_lengths[idx]
self.assertEqual(unbatched_output[idx], w[:l])
def test_preemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype)
coeff = 0.9
actual = F.preemphasis(waveform, coeff=coeff)
expected = []
for i in range(waveform.size(0)):
expected.append(F.preemphasis(waveform[i], coeff=coeff))
self.assertEqual(torch.stack(expected), actual)
def test_deemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype)
coeff = 0.9
actual = F.deemphasis(waveform, coeff=coeff)
expected = []
for i in range(waveform.size(0)):
expected.append(F.deemphasis(waveform[i], coeff=coeff))
self.assertEqual(torch.stack(expected), actual)
|
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3)
L_x, L_y = 89, 43
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
actual = fn(x, y, mode)
expected = torch.stack(
[
torch.stack(
[fn(x[i, j].unsqueeze(0), y[i, j].unsqueeze(0), mode).squeeze(0) for j in range(leading_dims[1])]
)
for i in range(leading_dims[0])
]
)
self.assertEqual(expected, actual)
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device) * 10
actual = F.add_noise(waveform, noise, lengths, snr)
expected = []
for i in range(leading_dims[0]):
for j in range(leading_dims[1]):
for k in range(leading_dims[2]):
expected.append(F.add_noise(waveform[i][j][k], noise[i][j][k], lengths[i][j][k], snr[i][j][k]))
self.assertEqual(torch.stack(expected), actual.reshape(-1, L))
def test_speed(self):
B = 5
orig_freq = 100
factor = 0.8
input_lengths = torch.randint(1, 1000, (B,), dtype=torch.int32)
unbatched_input = [torch.ones((int(length),)) * 1.0 for length in input_lengths]
batched_input = torch.nn.utils.rnn.pad_sequence(unbatched_input, batch_first=True)
output, output_lengths = F.speed(batched_input, input_lengths, orig_freq=orig_freq, factor=factor)
unbatched_output = []
unbatched_output_lengths = []
for idx in range(len(unbatched_input)):
w, l = F.speed(unbatched_input[idx], input_lengths[idx], orig_freq=orig_freq, factor=factor)
unbatched_output.append(w)
unbatched_output_lengths.append(l)
self.assertEqual(output_lengths, torch.stack(unbatched_output_lengths))
for idx in range(len(unbatched_output)):
w, l = output[idx], output_lengths[idx]
self.assertEqual(unbatched_output[idx], w[:l])
def test_preemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype)
coeff = 0.9
actual = F.preemphasis(waveform, coeff=coeff)
expected = []
for i in range(waveform.size(0)):
expected.append(F.preemphasis(waveform[i], coeff=coeff))
self.assertEqual(torch.stack(expected), actual)
def test_deemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype)
coeff = 0.9
actual = F.deemphasis(waveform, coeff=coeff)
expected = []
for i in range(waveform.size(0)):
expected.append(F.deemphasis(waveform[i], coeff=coeff))
self.assertEqual(torch.stack(expected), actual)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromFile,
LoadImageFromWebcam, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'FilterAnnotations',
'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'Shear', 'Rotate', 'ColorTransform',
'EqualizeTransform', 'BrightnessTransform', 'ContrastTransform',
'Translate', 'RandomShift', 'Mosaic', 'MixUp', 'RandomAffine',
'YOLOXHSVRandomAug', 'CopyPaste'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste'
]
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
if not name:
return metrics
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
"""
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
if not name:
return metrics
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
|
"""
==============================================
Regularization path of L1- Logistic Regression
==============================================
Train l1-penalized logistic regression models on a binary classification
problem derived from the Iris dataset.
The models are ordered from strongest regularized to least regularized. The 4
coefficients of the models are collected and plotted as a "regularization
path": on the left-hand side of the figure (strong regularizers), all the
coefficients are exactly 0. When regularization gets progressively looser,
coefficients can get non-zero values one after the other.
Here we choose the liblinear solver because it can efficiently optimize for the
Logistic Regression loss with a non-smooth, sparsity inducing l1 penalty.
Also note that we set a low value for the tolerance to make sure that the model
has converged before collecting the coefficients.
We also use warm_start=True which means that the coefficients of the models are
reused to initialize the next model fit to speed-up the computation of the
full-path.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Load data
# ---------
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
feature_names = iris.feature_names
# %%
# Here we remove the third class to make the problem a binary classification
X = X[y != 2]
y = y[y != 2]
# %%
# Compute regularization path
# ---------------------------
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import l1_min_c
cs = l1_min_c(X, y, loss="log") * np.logspace(0, 1, 16)
# %%
# Create a pipeline with `StandardScaler` and `LogisticRegression`, to normalize
# the data before fitting a linear model, in order to speed-up convergence and
# make the coefficients comparable. Also, as a side effect, since the data is now
# centered around 0, we don't need to fit an intercept.
clf = make_pipeline(
StandardScaler(),
LogisticRegression(
penalty="l1",
solver="liblinear",
tol=1e-6,
max_iter=int(1e6),
warm_start=True,
fit_intercept=False,
),
)
coefs_ = []
for c in cs:
clf.set_params(logisticregression__C=c)
clf.fit(X, y)
coefs_.append(clf["logisticregression"].coef_.ravel().copy())
coefs_ = np.array(coefs_)
# %%
# Plot regularization path
# ------------------------
import matplotlib.pyplot as plt
# Colorblind-friendly palette (IBM Color Blind Safe palette)
colors = ["#648FFF", "#785EF0", "#DC267F", "#FE6100"]
plt.figure(figsize=(10, 6))
for i in range(coefs_.shape[1]):
plt.semilogx(cs, coefs_[:, i], marker="o", color=colors[i], label=feature_names[i])
ymin, ymax = plt.ylim()
plt.xlabel("C")
plt.ylabel("Coefficients")
plt.title("Logistic Regression Path")
plt.legend()
plt.axis("tight")
plt.show()
|
"""
==============================================
Regularization path of L1- Logistic Regression
==============================================
Train l1-penalized logistic regression models on a binary classification
problem derived from the Iris dataset.
The models are ordered from strongest regularized to least regularized. The 4
coefficients of the models are collected and plotted as a "regularization
path": on the left-hand side of the figure (strong regularizers), all the
coefficients are exactly 0. When regularization gets progressively looser,
coefficients can get non-zero values one after the other.
Here we choose the liblinear solver because it can efficiently optimize for the
Logistic Regression loss with a non-smooth, sparsity inducing l1 penalty.
Also note that we set a low value for the tolerance to make sure that the model
has converged before collecting the coefficients.
We also use warm_start=True which means that the coefficients of the models are
reused to initialize the next model fit to speed-up the computation of the
full-path.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Load data
# ---------
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X /= X.max() # Normalize X to speed-up convergence
# %%
# Compute regularization path
# ---------------------------
import numpy as np
from sklearn import linear_model
from sklearn.svm import l1_min_c
cs = l1_min_c(X, y, loss="log") * np.logspace(0, 10, 16)
clf = linear_model.LogisticRegression(
penalty="l1",
solver="liblinear",
tol=1e-6,
max_iter=int(1e6),
warm_start=True,
intercept_scaling=10000.0,
)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
coefs_ = np.array(coefs_)
# %%
# Plot regularization path
# ------------------------
import matplotlib.pyplot as plt
plt.plot(np.log10(cs), coefs_, marker="o")
ymin, ymax = plt.ylim()
plt.xlabel("log(C)")
plt.ylabel("Coefficients")
plt.title("Logistic Regression Path")
plt.axis("tight")
plt.show()
|
"""
LocalAI is a free, open source, and self-hosted OpenAI alternative.
Docs: https://localai.io/
Source: https://github.com/go-skynet/LocalAI
"""
import warnings
from types import MappingProxyType
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.core.base.llms.types import ChatMessage, LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import DEFAULT_CONTEXT_WINDOW
from llama_index.core.types import BaseOutputParser, PydanticProgramMode
from llama_index.llms.openai import OpenAI
from llama_index.llms.openai.utils import is_function_calling_model
from llama_index.llms.openai_like import OpenAILike
# Use these as kwargs for OpenAILike to connect to LocalAIs
DEFAULT_LOCALAI_PORT = 8080
# TODO: move to MappingProxyType[str, Any] once Python 3.9+
LOCALAI_DEFAULTS: Dict[str, Any] = MappingProxyType( # type: ignore[assignment]
{
"api_key": "localai_fake",
"api_type": "localai_fake",
"api_base": f"http://localhost:{DEFAULT_LOCALAI_PORT}/v1",
}
)
class LocalAI(OpenAI):
"""
LocalAI LLM class.
Examples:
`pip install llama-index-llms-localai`
```python
from llama_index.llms.localai import LocalAI
llm = LocalAI(api_base="http://localhost:8080/v1")
response = llm.complete("Hello!")
print(str(response))
```
"""
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model.",
gt=0,
)
globally_use_chat_completions: Optional[bool] = Field(
default=None,
description=(
"Set None (default) to per-invocation decide on using /chat/completions"
" vs /completions endpoints with query keyword arguments,"
" set False to universally use /completions endpoint,"
" set True to universally use /chat/completions endpoint."
),
)
def __init__(
self,
api_key: Optional[str] = LOCALAI_DEFAULTS["api_key"],
api_base: Optional[str] = LOCALAI_DEFAULTS["api_base"],
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
**kwargs: Any,
) -> None:
super().__init__(
api_key=api_key,
api_base=api_base,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
**kwargs,
)
warnings.warn(
(
f"{type(self).__name__} subclass is deprecated in favor of"
f" {OpenAILike.__name__} composition. The deprecation cycle"
" will complete sometime in late December 2023."
),
DeprecationWarning,
stacklevel=2,
)
@classmethod
def class_name(cls) -> str:
return "LocalAI"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=self.context_window,
num_output=self.max_tokens or -1,
is_chat_model=self._is_chat_model,
is_function_calling_model=is_function_calling_model(
model=self._get_model_name()
),
model_name=self.model,
)
def _update_max_tokens(self, all_kwargs: Dict[str, Any], prompt: str) -> None:
# This subclass only supports max_tokens via LocalAI(..., max_tokens=123)
del all_kwargs, prompt # Unused
# do nothing
@property
def _is_chat_model(self) -> bool:
if self.globally_use_chat_completions is not None:
return self.globally_use_chat_completions
raise NotImplementedError(
"Inferring of when to use /chat/completions is unsupported by"
f" {type(self).__name__}. Please either set 'globally_use_chat_completions'"
" arg during construction, or pass the arg 'use_chat_completions' in your"
" query, setting True for /chat/completions or False for /completions."
)
|
"""
LocalAI is a free, open source, and self-hosted OpenAI alternative.
Docs: https://localai.io/
Source: https://github.com/go-skynet/LocalAI
"""
import warnings
from types import MappingProxyType
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.core.base.llms.types import ChatMessage, LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import DEFAULT_CONTEXT_WINDOW
from llama_index.core.types import BaseOutputParser, PydanticProgramMode
from llama_index.llms.openai import OpenAI
from llama_index.llms.openai.utils import is_function_calling_model
from llama_index.llms.openai_like import OpenAILike
# Use these as kwargs for OpenAILike to connect to LocalAIs
DEFAULT_LOCALAI_PORT = 8080
# TODO: move to MappingProxyType[str, Any] once Python 3.9+
LOCALAI_DEFAULTS: Dict[str, Any] = MappingProxyType( # type: ignore[assignment]
{
"api_key": "localai_fake",
"api_type": "localai_fake",
"api_base": f"http://localhost:{DEFAULT_LOCALAI_PORT}/v1",
}
)
class LocalAI(OpenAI):
"""LocalAI LLM class.
Examples:
`pip install llama-index-llms-localai`
```python
from llama_index.llms.localai import LocalAI
llm = LocalAI(api_base="http://localhost:8080/v1")
response = llm.complete("Hello!")
print(str(response))
```
"""
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model.",
gt=0,
)
globally_use_chat_completions: Optional[bool] = Field(
default=None,
description=(
"Set None (default) to per-invocation decide on using /chat/completions"
" vs /completions endpoints with query keyword arguments,"
" set False to universally use /completions endpoint,"
" set True to universally use /chat/completions endpoint."
),
)
def __init__(
self,
api_key: Optional[str] = LOCALAI_DEFAULTS["api_key"],
api_base: Optional[str] = LOCALAI_DEFAULTS["api_base"],
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
**kwargs: Any,
) -> None:
super().__init__(
api_key=api_key,
api_base=api_base,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
**kwargs,
)
warnings.warn(
(
f"{type(self).__name__} subclass is deprecated in favor of"
f" {OpenAILike.__name__} composition. The deprecation cycle"
" will complete sometime in late December 2023."
),
DeprecationWarning,
stacklevel=2,
)
@classmethod
def class_name(cls) -> str:
return "LocalAI"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=self.context_window,
num_output=self.max_tokens or -1,
is_chat_model=self._is_chat_model,
is_function_calling_model=is_function_calling_model(
model=self._get_model_name()
),
model_name=self.model,
)
def _update_max_tokens(self, all_kwargs: Dict[str, Any], prompt: str) -> None:
# This subclass only supports max_tokens via LocalAI(..., max_tokens=123)
del all_kwargs, prompt # Unused
# do nothing
@property
def _is_chat_model(self) -> bool:
if self.globally_use_chat_completions is not None:
return self.globally_use_chat_completions
raise NotImplementedError(
"Inferring of when to use /chat/completions is unsupported by"
f" {type(self).__name__}. Please either set 'globally_use_chat_completions'"
" arg during construction, or pass the arg 'use_chat_completions' in your"
" query, setting True for /chat/completions or False for /completions."
)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth')))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'])
]
data = dict(train=dict(pipeline=train_pipeline))
|
from typing import Union
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.image.image_tensorflow_tensor import (
ImageTensorFlowTensor as ImageTFTensor,
)
ImageTensor = Union[ImageNdArray] # type: ignore
if tf_available and torch_available:
ImageTensor = Union[ImageNdArray, ImageTorchTensor, ImageTFTensor] # type: ignore
elif tf_available:
ImageTensor = Union[ImageNdArray, ImageTFTensor] # type: ignore
elif torch_available:
ImageTensor = Union[ImageNdArray, ImageTorchTensor] # type: ignore
|
from typing import Union
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.image.image_tensorflow_tensor import (
ImageTensorFlowTensor as ImageTFTensor,
)
ImageTensor = Union[ImageNdArray] # type: ignore
if tf_available and torch_available:
ImageTensor = Union[ImageNdArray, ImageTorchTensor, ImageTFTensor] # type: ignore
elif tf_available:
ImageTensor = Union[ImageNdArray, ImageTFTensor] # type: ignore
elif torch_available:
ImageTensor = Union[ImageNdArray, ImageTorchTensor] # type: ignore
|
from typing import TYPE_CHECKING, Union, BinaryIO
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri, _get_file_context
if TYPE_CHECKING:
from docarray.typing import T
class BlobDataMixin:
"""Provide helper functions for :class:`Document` to handle binary data."""
def load_uri_to_blob(self: 'T') -> 'T':
"""Convert :attr:`.uri` to :attr:`.blob` inplace.
Internally it downloads from the URI and set :attr:`blob`.
:return: itself after processed
"""
self.blob = _uri_to_blob(self.uri)
return self
def convert_blob_to_datauri(
self: 'T', charset: str = 'utf-8', base64: bool = False
) -> 'T':
"""Convert :attr:`.blob` to data :attr:`.uri` in place.
Internally it first reads into blob and then converts it to data URI.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit.
Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that
frequently uses non-US-ASCII characters.
:return: itself after processed
"""
if not self.mime_type:
raise ValueError(
f'{self.mime_type} is unset, can not convert it to data uri'
)
self.uri = _to_datauri(self.mime_type, self.blob, charset, base64, binary=True)
return self
def save_blob_to_file(self: 'T', file: Union[str, BinaryIO]) -> 'T':
"""Save :attr:`.blob` into a file
:param file: File or filename to which the data is saved.
:return: itself after processed
"""
fp = _get_file_context(file)
with fp:
fp.write(self.blob)
return self
|
from typing import TYPE_CHECKING, Union, BinaryIO
from .helper import _uri_to_blob, _to_datauri, _get_file_context
if TYPE_CHECKING:
from ...typing import T
class BlobDataMixin:
"""Provide helper functions for :class:`Document` to handle binary data."""
def load_uri_to_blob(self: 'T') -> 'T':
"""Convert :attr:`.uri` to :attr:`.blob` inplace.
Internally it downloads from the URI and set :attr:`blob`.
:return: itself after processed
"""
self.blob = _uri_to_blob(self.uri)
return self
def convert_blob_to_datauri(
self: 'T', charset: str = 'utf-8', base64: bool = False
) -> 'T':
"""Convert :attr:`.blob` to data :attr:`.uri` in place.
Internally it first reads into blob and then converts it to data URI.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit.
Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that
frequently uses non-US-ASCII characters.
:return: itself after processed
"""
if not self.mime_type:
raise ValueError(
f'{self.mime_type} is unset, can not convert it to data uri'
)
self.uri = _to_datauri(self.mime_type, self.blob, charset, base64, binary=True)
return self
def save_blob_to_file(self: 'T', file: Union[str, BinaryIO]) -> 'T':
"""Save :attr:`.blob` into a file
:param file: File or filename to which the data is saved.
:return: itself after processed
"""
fp = _get_file_context(file)
with fp:
fp.write(self.blob)
return self
|
from pathlib import Path
from typing import List
import numpy as np
import pytest
import scipy
from jina import Document, DocumentArray, Executor
from jina.excepts import PretrainedModelFileDoesNotExist
from ...tfidf_text_executor import TFIDFTextEncoder
_EMBEDDING_DIM = 130107
@pytest.fixture(scope='session')
def basic_encoder() -> TFIDFTextEncoder:
return TFIDFTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.__class__.__name__ == 'TFIDFTextEncoder'
def test_error_no_file():
with pytest.raises(PretrainedModelFileDoesNotExist):
TFIDFTextEncoder(path_vectorizer='does/not/exist')
def test_no_document(basic_encoder: TFIDFTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: TFIDFTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: TFIDFTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_tfidf_text_encoder(basic_encoder: TFIDFTextEncoder):
doc = Document(text='Han likes eating pizza')
docarray = DocumentArray([doc])
basic_encoder.encode(docarray, parameters={})
embedding = doc.embedding
assert embedding.shape == (1, _EMBEDDING_DIM)
assert embedding.size == 4
def test_tfidf_text_encoder_batch(basic_encoder: TFIDFTextEncoder):
# Input
text_batch = ['Han likes eating pizza', 'Han likes pizza', 'Jina rocks']
# Encoder embedding
docarray = DocumentArray([Document(text=text) for text in text_batch])
basic_encoder.encode(docarray, parameters={})
embeddeding_batch = scipy.sparse.vstack(docarray.get_attributes('embedding'))
assert embeddeding_batch.shape == (3, _EMBEDDING_DIM)
assert embeddeding_batch.size == 8
embs = np.asarray(embeddeding_batch.todense())
# They overlap in Han
assert (embs[0] * embs[1]).sum() > 0.1
# They do not overlap
assert (embs[0] * embs[2]).sum() == 0
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: TFIDFTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: TFIDFTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (1, _EMBEDDING_DIM)
|
from pathlib import Path
import numpy as np
import scipy
from jina import Document, DocumentArray, Executor
from ...tfidf_text_executor import TFIDFTextEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.path_vectorizer.endswith('tfidf_vectorizer.pickle')
def test_tfidf_text_encoder():
text = 'Han likes eating pizza'
encoder = TFIDFTextEncoder()
doc = Document(text=text)
docarray = DocumentArray([doc])
encoder.encode(docarray, parameters={})
embedding = doc.embedding
expected = scipy.sparse.load_npz(Path(__file__).parent / 'expected.npz')
np.testing.assert_almost_equal(embedding.todense(), expected.todense(), decimal=4)
assert expected.shape[0] == 1
def test_tfidf_text_encoder_batch():
# Input
text_batch = ['Han likes eating pizza', 'Han likes pizza', 'Jina rocks']
# Encoder embedding
encoder = TFIDFTextEncoder()
doc0 = Document(text=text_batch[0])
doc1 = Document(text=text_batch[1])
doc2 = Document(text=text_batch[2])
docarray = DocumentArray([doc0, doc1, doc2])
encoder.encode(docarray, parameters={})
embeddeding_batch = scipy.sparse.vstack(docarray.get_attributes('embedding'))
# Compare with ouptut
expected_batch = scipy.sparse.load_npz(Path(__file__).parent / 'expected_batch.npz')
np.testing.assert_almost_equal(
embeddeding_batch.todense(), expected_batch.todense(), decimal=2
)
assert expected_batch.shape[0] == len(text_batch)
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Model Query Sparsity: Active Dimensions: 63.0, Sparsity Ratio: 0.9979
Model Corpus Sparsity: Active Dimensions: 63.4, Sparsity Ratio: 0.9979
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Model Query Sparsity: Active Dimensions: 48.1, Sparsity Ratio: 0.9984
Model Corpus Sparsity: Active Dimensions: 125.4, Sparsity Ratio: 0.9959
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
Model Query Sparsity: Active Dimensions: 55.5, Sparsity Ratio: 0.9982
Model Corpus Sparsity: Active Dimensions: 94.4, Sparsity Ratio: 0.9969
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Model Sparsity Stats Query : Row Non-Zero Mean: 62.97999954223633, Row Sparsity Mean: 0.9979365468025208
Model Sparsity Stats Corpus : Row Non-Zero Mean: 63.39932632446289, Row Sparsity Mean: 0.9979228377342224
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Model Sparsity Stats Query : Row Non-Zero Mean: 48.08000183105469, Row Sparsity Mean: 0.9984247088432312
Model Sparsity Stats Corpus : Row Non-Zero Mean: 125.3604965209961, Row Sparsity Mean: 0.9958928227424622
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
Model Sparsity Stats Query : Row Non-Zero Mean: 55.53000068664551, Row Sparsity Mean: 0.998180627822876
Model Sparsity Stats Corpus : Row Non-Zero Mean: 94.37991142272949, Row Sparsity Mean: 0.9969078302383423
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
"""Google Finance API Toolkit."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_finance.tool import GoogleFinanceQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleFinanceQueryRun": "langchain_community.tools.google_finance.tool",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleFinanceQueryRun",
]
|
"""Google Finance API Toolkit."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.google_finance.tool import GoogleFinanceQueryRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleFinanceQueryRun": "langchain_community.tools.google_finance.tool"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleFinanceQueryRun",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import find_latest_checkpoint, update_data_root
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptPixelList, PixelList,
RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import add_dump_metric, find_latest_checkpoint, update_data_root
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptPixelList, PixelList,
RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType', 'add_dump_metric'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .cspnext_pafpn import CSPNeXtPAFPN
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .dyhead import DyHead
from .fpg import FPG
from .fpn import FPN
from .fpn_carafe import FPN_CARAFE
from .hrfpn import HRFPN
from .nas_fpn import NASFPN
from .nasfcos_fpn import NASFCOS_FPN
from .pafpn import PAFPN
from .rfp import RFP
from .ssd_neck import SSDNeck
from .ssh import SSH
from .yolo_neck import YOLOV3Neck
from .yolox_pafpn import YOLOXPAFPN
__all__ = [
'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',
'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead', 'CSPNeXtPAFPN', 'SSH'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .cspnext_pafpn import CSPNeXtPAFPN
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .dyhead import DyHead
from .fpg import FPG
from .fpn import FPN
from .fpn_carafe import FPN_CARAFE
from .hrfpn import HRFPN
from .nas_fpn import NASFPN
from .nasfcos_fpn import NASFCOS_FPN
from .pafpn import PAFPN
from .rfp import RFP
from .ssd_neck import SSDNeck
from .yolo_neck import YOLOV3Neck
from .yolox_pafpn import YOLOXPAFPN
__all__ = [
'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',
'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead', 'CSPNeXtPAFPN'
]
|
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.format_instructions import JSON_FORMAT_INSTRUCTIONS
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.json import (
parse_and_check_json_markdown,
parse_json_markdown,
parse_partial_json,
)
from langchain_core.utils.pydantic import PYDANTIC_MAJOR_VERSION
if PYDANTIC_MAJOR_VERSION < 2:
PydanticBaseModel = pydantic.BaseModel
else:
from pydantic.v1 import BaseModel
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
"""Parse the output of an LLM call to a JSON object.
When used in streaming mode, it will yield partial JSON objects containing
all the keys that have been returned so far.
In streaming, if `diff` is set to `True`, yields JSONPatch operations
describing the difference between the previous and the current object.
"""
pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore
"""The Pydantic object to use for validation.
If None, no validation is performed."""
def _diff(self, prev: Optional[Any], next: Any) -> Any:
return jsonpatch.make_patch(prev, next).patch
def _get_schema(self, pydantic_object: type[TBaseModel]) -> dict[str, Any]:
if issubclass(pydantic_object, pydantic.BaseModel):
return pydantic_object.model_json_schema()
elif issubclass(pydantic_object, pydantic.v1.BaseModel):
return pydantic_object.schema()
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
If False, the output will be the full JSON object.
Default is False.
Returns:
The parsed JSON object.
Raises:
OutputParserException: If the output is not valid JSON.
"""
text = result[0].text
text = text.strip()
if partial:
try:
return parse_json_markdown(text)
except JSONDecodeError:
return None
else:
try:
return parse_json_markdown(text)
except JSONDecodeError as e:
msg = f"Invalid json output: {text}"
raise OutputParserException(msg, llm_output=text) from e
def parse(self, text: str) -> Any:
"""Parse the output of an LLM call to a JSON object.
Args:
text: The output of the LLM call.
Returns:
The parsed JSON object.
"""
return self.parse_result([Generation(text=text)])
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
if self.pydantic_object is None:
return "Return a JSON object."
else:
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self._get_schema(self.pydantic_object).items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "simple_json_output_parser"
# For backwards compatibility
SimpleJsonOutputParser = JsonOutputParser
__all__ = [
"JsonOutputParser",
"SimpleJsonOutputParser", # For backwards compatibility
"parse_partial_json", # For backwards compatibility
"parse_and_check_json_markdown", # For backwards compatibility
]
|
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.format_instructions import JSON_FORMAT_INSTRUCTIONS
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.json import (
parse_and_check_json_markdown,
parse_json_markdown,
parse_partial_json,
)
from langchain_core.utils.pydantic import PYDANTIC_MAJOR_VERSION
if PYDANTIC_MAJOR_VERSION < 2:
PydanticBaseModel = pydantic.BaseModel
else:
from pydantic.v1 import BaseModel
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
"""Parse the output of an LLM call to a JSON object.
When used in streaming mode, it will yield partial JSON objects containing
all the keys that have been returned so far.
In streaming, if `diff` is set to `True`, yields JSONPatch operations
describing the difference between the previous and the current object.
"""
pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore
"""The Pydantic object to use for validation.
If None, no validation is performed."""
def _diff(self, prev: Optional[Any], next: Any) -> Any:
return jsonpatch.make_patch(prev, next).patch
def _get_schema(self, pydantic_object: type[TBaseModel]) -> dict[str, Any]:
if issubclass(pydantic_object, pydantic.BaseModel):
return pydantic_object.model_json_schema()
elif issubclass(pydantic_object, pydantic.v1.BaseModel):
return pydantic_object.schema()
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
If False, the output will be the full JSON object.
Default is False.
Returns:
The parsed JSON object.
Raises:
OutputParserException: If the output is not valid JSON.
"""
text = result[0].text
text = text.strip()
if partial:
try:
return parse_json_markdown(text)
except JSONDecodeError:
return None
else:
try:
return parse_json_markdown(text)
except JSONDecodeError as e:
msg = f"Invalid json output: {text}"
raise OutputParserException(msg, llm_output=text) from e
def parse(self, text: str) -> Any:
"""Parse the output of an LLM call to a JSON object.
Args:
text: The output of the LLM call.
Returns:
The parsed JSON object.
"""
return self.parse_result([Generation(text=text)])
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
if self.pydantic_object is None:
return "Return a JSON object."
else:
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self._get_schema(self.pydantic_object).items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "simple_json_output_parser"
# For backwards compatibility
SimpleJsonOutputParser = JsonOutputParser
parse_partial_json = parse_partial_json
parse_and_check_json_markdown = parse_and_check_json_markdown
|
import torch
from torchaudio.models import emformer_rnnt_model, RNNTBeamSearch
from torchaudio_unittest.common_utils import TestBaseMixin, torch_script
class RNNTBeamSearchTestImpl(TestBaseMixin):
def _get_input_config(self):
model_config = self._get_model_config()
return {
"batch_size": 1,
"max_input_length": 61,
"num_symbols": model_config["num_symbols"],
"input_dim": model_config["input_dim"],
"right_context_length": model_config["right_context_length"],
"segment_length": model_config["segment_length"],
}
def _get_model_config(self):
return {
"input_dim": 80,
"encoding_dim": 128,
"num_symbols": 256,
"segment_length": 16,
"right_context_length": 4,
"time_reduction_input_dim": 128,
"time_reduction_stride": 4,
"transformer_num_heads": 4,
"transformer_ffn_dim": 64,
"transformer_num_layers": 3,
"transformer_dropout": 0.0,
"transformer_activation": "relu",
"transformer_left_context_length": 30,
"transformer_max_memory_size": 0,
"transformer_weight_init_scale_strategy": "depthwise",
"transformer_tanh_on_mem": True,
"symbol_embedding_dim": 64,
"num_lstm_layers": 2,
"lstm_layer_norm": True,
"lstm_layer_norm_epsilon": 1e-3,
"lstm_dropout": 0.0,
}
def _get_model(self):
return emformer_rnnt_model(**self._get_model_config()).to(device=self.device, dtype=self.dtype).eval()
def test_torchscript_consistency_forward(self):
r"""Verify that scripting RNNTBeamSearch does not change the behavior of method `forward`."""
input_config = self._get_input_config()
batch_size = input_config["batch_size"]
max_input_length = input_config["max_input_length"]
right_context_length = input_config["right_context_length"]
input_dim = input_config["input_dim"]
num_symbols = input_config["num_symbols"]
blank_idx = num_symbols - 1
beam_width = 5
input = torch.rand(batch_size, max_input_length + right_context_length, input_dim).to(
device=self.device, dtype=self.dtype
)
lengths = torch.randint(1, max_input_length + 1, (batch_size,)).to(device=self.device, dtype=torch.int32)
model = self._get_model()
beam_search = RNNTBeamSearch(model, blank_idx)
scripted = torch_script(beam_search)
res = beam_search(input, lengths, beam_width)
scripted_res = scripted(input, lengths, beam_width)
self.assertEqual(res, scripted_res)
def test_torchscript_consistency_infer(self):
r"""Verify that scripting RNNTBeamSearch does not change the behavior of method `infer`."""
input_config = self._get_input_config()
segment_length = input_config["segment_length"]
right_context_length = input_config["right_context_length"]
input_dim = input_config["input_dim"]
num_symbols = input_config["num_symbols"]
blank_idx = num_symbols - 1
beam_width = 5
input = torch.rand(segment_length + right_context_length, input_dim).to(device=self.device, dtype=self.dtype)
lengths = torch.randint(1, segment_length + right_context_length + 1, ()).to(
device=self.device, dtype=torch.int32
)
model = self._get_model()
state, hypo = None, None
scripted_state, scripted_hypo = None, None
for _ in range(2):
beam_search = RNNTBeamSearch(model, blank_idx)
scripted = torch_script(beam_search)
res = beam_search.infer(input, lengths, beam_width, state=state, hypothesis=hypo)
scripted_res = scripted.infer(input, lengths, beam_width, state=scripted_state, hypothesis=scripted_hypo)
self.assertEqual(res, scripted_res)
state = res[1]
hypo = res[0][0]
scripted_state = scripted_res[1]
scripted_hypo = scripted_res[0][0]
|
import torch
from torchaudio.models import emformer_rnnt_model, RNNTBeamSearch
from torchaudio_unittest.common_utils import TestBaseMixin, torch_script
class RNNTBeamSearchTestImpl(TestBaseMixin):
def _get_input_config(self):
model_config = self._get_model_config()
return {
"batch_size": 1,
"max_input_length": 61,
"num_symbols": model_config["num_symbols"],
"input_dim": model_config["input_dim"],
"right_context_length": model_config["right_context_length"],
"segment_length": model_config["segment_length"],
}
def _get_model_config(self):
return {
"input_dim": 80,
"encoding_dim": 128,
"num_symbols": 256,
"segment_length": 16,
"right_context_length": 4,
"time_reduction_input_dim": 128,
"time_reduction_stride": 4,
"transformer_num_heads": 4,
"transformer_ffn_dim": 64,
"transformer_num_layers": 3,
"transformer_dropout": 0.0,
"transformer_activation": "relu",
"transformer_left_context_length": 30,
"transformer_max_memory_size": 0,
"transformer_weight_init_scale_strategy": "depthwise",
"transformer_tanh_on_mem": True,
"symbol_embedding_dim": 64,
"num_lstm_layers": 2,
"lstm_layer_norm": True,
"lstm_layer_norm_epsilon": 1e-3,
"lstm_dropout": 0.0,
}
def _get_model(self):
return emformer_rnnt_model(**self._get_model_config()).to(device=self.device, dtype=self.dtype).eval()
def test_torchscript_consistency_forward(self):
r"""Verify that scripting RNNTBeamSearch does not change the behavior of method `forward`."""
torch.random.manual_seed(31)
input_config = self._get_input_config()
batch_size = input_config["batch_size"]
max_input_length = input_config["max_input_length"]
right_context_length = input_config["right_context_length"]
input_dim = input_config["input_dim"]
num_symbols = input_config["num_symbols"]
blank_idx = num_symbols - 1
beam_width = 5
input = torch.rand(batch_size, max_input_length + right_context_length, input_dim).to(
device=self.device, dtype=self.dtype
)
lengths = torch.randint(1, max_input_length + 1, (batch_size,)).to(device=self.device, dtype=torch.int32)
model = self._get_model()
beam_search = RNNTBeamSearch(model, blank_idx)
scripted = torch_script(beam_search)
res = beam_search(input, lengths, beam_width)
scripted_res = scripted(input, lengths, beam_width)
self.assertEqual(res, scripted_res)
def test_torchscript_consistency_infer(self):
r"""Verify that scripting RNNTBeamSearch does not change the behavior of method `infer`."""
torch.random.manual_seed(31)
input_config = self._get_input_config()
segment_length = input_config["segment_length"]
right_context_length = input_config["right_context_length"]
input_dim = input_config["input_dim"]
num_symbols = input_config["num_symbols"]
blank_idx = num_symbols - 1
beam_width = 5
input = torch.rand(segment_length + right_context_length, input_dim).to(device=self.device, dtype=self.dtype)
lengths = torch.randint(1, segment_length + right_context_length + 1, ()).to(
device=self.device, dtype=torch.int32
)
model = self._get_model()
state, hypo = None, None
scripted_state, scripted_hypo = None, None
for _ in range(2):
beam_search = RNNTBeamSearch(model, blank_idx)
scripted = torch_script(beam_search)
res = beam_search.infer(input, lengths, beam_width, state=state, hypothesis=hypo)
scripted_res = scripted.infer(input, lengths, beam_width, state=scripted_state, hypothesis=scripted_hypo)
self.assertEqual(res, scripted_res)
state = res[1]
hypo = res[0][0]
scripted_state = scripted_res[1]
scripted_hypo = scripted_res[0][0]
|
from __future__ import annotations
from typing import Callable
try:
from typing import Self
except ImportError:
from typing_extensions import Self
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
from sentence_transformers.util import fullname, import_from_string
class Dense(Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
config_keys: list[str] = [
"in_features",
"out_features",
"bias",
"activation_function",
]
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function: Callable[[Tensor], Tensor] | None = nn.Tanh(),
init_weight: Tensor | None = None,
init_bias: Tensor | None = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = nn.Identity() if activation_function is None else activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
config["activation_function"] = import_from_string(config["activation_function"])()
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
from typing import Callable
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function: Callable[[Tensor], Tensor] | None = nn.Tanh(),
init_weight: Tensor | None = None,
init_bias: Tensor | None = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = nn.Identity() if activation_function is None else activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, ImageUrl
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import Image
# use it directly
image = Image(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray import Image
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(Image):
second_embedding: Optional[AnyEmbedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument, Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
"""
url: Optional[ImageUrl]
tensor: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import AnyTensor, Embedding, ImageUrl
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an Embedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import Image
# use it directly
image = Image(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray import Image
from docarray.typing import Embedding
from typing import Optional
# extend it
class MyImage(Image):
second_embedding: Optional[Embedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument, Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
"""
url: Optional[ImageUrl]
tensor: Optional[AnyTensor]
embedding: Optional[Embedding]
|
from pathlib import Path
import numpy as np
import paddlehub as hub
import pytest
from jina import Document, DocumentArray, Executor
from text_paddle import TextPaddleEncoder
@pytest.fixture(scope='function')
def model():
return hub.Module(name='ernie_tiny')
@pytest.fixture(scope='function')
def content():
return 'hello world'
@pytest.fixture(scope='function')
def document_array(content):
return DocumentArray([Document(content=content)])
@pytest.fixture(scope='function')
def parameters():
return {'traverse_paths': ['r'], 'batch_size': 10}
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
def test_text_paddle(model, document_array, content, parameters):
ex = TextPaddleEncoder()
assert ex.on_gpu is False
ex.encode(document_array, parameters)
for doc in document_array:
assert isinstance(doc.embedding, np.ndarray)
assert doc.embedding.shape == (1024,)
embeds = model.get_embedding([[content]])
pooled_features = []
for embed in embeds:
pooled_feature, _ = embed
pooled_features.append(pooled_feature)
assert (pooled_features == document_array[0].embedding).all()
|
from pathlib import Path
import numpy as np
import paddlehub as hub
import pytest
from jina import Document, DocumentArray, Executor
from ...text_paddle import TextPaddleEncoder
@pytest.fixture(scope='function')
def model():
return hub.Module(name='ernie_tiny')
@pytest.fixture(scope='function')
def content():
return 'hello world'
@pytest.fixture(scope='function')
def document_array(content):
return DocumentArray([Document(content=content)])
@pytest.fixture(scope='function')
def parameters():
return {'traverse_paths': ['r'], 'batch_size': 10}
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
def test_text_paddle(model, document_array, content, parameters):
ex = TextPaddleEncoder()
assert ex.on_gpu is False
ex.encode(document_array, parameters)
for doc in document_array:
assert isinstance(doc.embedding, np.ndarray)
assert doc.embedding.shape == (1024,)
embeds = model.get_embedding([[content]])
pooled_features = []
for embed in embeds:
pooled_feature, _ = embed
pooled_features.append(pooled_feature)
assert (pooled_features == document_array[0].embedding).all()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.applications import convnext as convnext
from keras.applications import densenet as densenet
from keras.applications import efficientnet as efficientnet
from keras.applications import efficientnet_v2 as efficientnet_v2
from keras.applications import imagenet_utils as imagenet_utils
from keras.applications import inception_resnet_v2 as inception_resnet_v2
from keras.applications import inception_v3 as inception_v3
from keras.applications import mobilenet as mobilenet
from keras.applications import mobilenet_v2 as mobilenet_v2
from keras.applications import mobilenet_v3 as mobilenet_v3
from keras.applications import nasnet as nasnet
from keras.applications import resnet as resnet
from keras.applications import resnet50 as resnet50
from keras.applications import resnet_v2 as resnet_v2
from keras.applications import vgg16 as vgg16
from keras.applications import vgg19 as vgg19
from keras.applications import xception as xception
from keras.src.applications.convnext import ConvNeXtBase as ConvNeXtBase
from keras.src.applications.convnext import ConvNeXtLarge as ConvNeXtLarge
from keras.src.applications.convnext import ConvNeXtSmall as ConvNeXtSmall
from keras.src.applications.convnext import ConvNeXtTiny as ConvNeXtTiny
from keras.src.applications.convnext import ConvNeXtXLarge as ConvNeXtXLarge
from keras.src.applications.densenet import DenseNet121 as DenseNet121
from keras.src.applications.densenet import DenseNet169 as DenseNet169
from keras.src.applications.densenet import DenseNet201 as DenseNet201
from keras.src.applications.efficientnet import EfficientNetB0 as EfficientNetB0
from keras.src.applications.efficientnet import EfficientNetB1 as EfficientNetB1
from keras.src.applications.efficientnet import EfficientNetB2 as EfficientNetB2
from keras.src.applications.efficientnet import EfficientNetB3 as EfficientNetB3
from keras.src.applications.efficientnet import EfficientNetB4 as EfficientNetB4
from keras.src.applications.efficientnet import EfficientNetB5 as EfficientNetB5
from keras.src.applications.efficientnet import EfficientNetB6 as EfficientNetB6
from keras.src.applications.efficientnet import EfficientNetB7 as EfficientNetB7
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B0 as EfficientNetV2B0,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B1 as EfficientNetV2B1,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B2 as EfficientNetV2B2,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B3 as EfficientNetV2B3,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2L as EfficientNetV2L,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2M as EfficientNetV2M,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2S as EfficientNetV2S,
)
from keras.src.applications.inception_resnet_v2 import (
InceptionResNetV2 as InceptionResNetV2,
)
from keras.src.applications.inception_v3 import InceptionV3 as InceptionV3
from keras.src.applications.mobilenet import MobileNet as MobileNet
from keras.src.applications.mobilenet_v2 import MobileNetV2 as MobileNetV2
from keras.src.applications.mobilenet_v3 import (
MobileNetV3Large as MobileNetV3Large,
)
from keras.src.applications.mobilenet_v3 import (
MobileNetV3Small as MobileNetV3Small,
)
from keras.src.applications.nasnet import NASNetLarge as NASNetLarge
from keras.src.applications.nasnet import NASNetMobile as NASNetMobile
from keras.src.applications.resnet import ResNet50 as ResNet50
from keras.src.applications.resnet import ResNet101 as ResNet101
from keras.src.applications.resnet import ResNet152 as ResNet152
from keras.src.applications.resnet_v2 import ResNet50V2 as ResNet50V2
from keras.src.applications.resnet_v2 import ResNet101V2 as ResNet101V2
from keras.src.applications.resnet_v2 import ResNet152V2 as ResNet152V2
from keras.src.applications.vgg16 import VGG16 as VGG16
from keras.src.applications.vgg19 import VGG19 as VGG19
from keras.src.applications.xception import Xception as Xception
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.applications import convnext
from keras.api.applications import densenet
from keras.api.applications import efficientnet
from keras.api.applications import efficientnet_v2
from keras.api.applications import imagenet_utils
from keras.api.applications import inception_resnet_v2
from keras.api.applications import inception_v3
from keras.api.applications import mobilenet
from keras.api.applications import mobilenet_v2
from keras.api.applications import mobilenet_v3
from keras.api.applications import nasnet
from keras.api.applications import resnet
from keras.api.applications import resnet50
from keras.api.applications import resnet_v2
from keras.api.applications import vgg16
from keras.api.applications import vgg19
from keras.api.applications import xception
from keras.src.applications.convnext import ConvNeXtBase
from keras.src.applications.convnext import ConvNeXtLarge
from keras.src.applications.convnext import ConvNeXtSmall
from keras.src.applications.convnext import ConvNeXtTiny
from keras.src.applications.convnext import ConvNeXtXLarge
from keras.src.applications.densenet import DenseNet121
from keras.src.applications.densenet import DenseNet169
from keras.src.applications.densenet import DenseNet201
from keras.src.applications.efficientnet import EfficientNetB0
from keras.src.applications.efficientnet import EfficientNetB1
from keras.src.applications.efficientnet import EfficientNetB2
from keras.src.applications.efficientnet import EfficientNetB3
from keras.src.applications.efficientnet import EfficientNetB4
from keras.src.applications.efficientnet import EfficientNetB5
from keras.src.applications.efficientnet import EfficientNetB6
from keras.src.applications.efficientnet import EfficientNetB7
from keras.src.applications.efficientnet_v2 import EfficientNetV2B0
from keras.src.applications.efficientnet_v2 import EfficientNetV2B1
from keras.src.applications.efficientnet_v2 import EfficientNetV2B2
from keras.src.applications.efficientnet_v2 import EfficientNetV2B3
from keras.src.applications.efficientnet_v2 import EfficientNetV2L
from keras.src.applications.efficientnet_v2 import EfficientNetV2M
from keras.src.applications.efficientnet_v2 import EfficientNetV2S
from keras.src.applications.inception_resnet_v2 import InceptionResNetV2
from keras.src.applications.inception_v3 import InceptionV3
from keras.src.applications.mobilenet import MobileNet
from keras.src.applications.mobilenet_v2 import MobileNetV2
from keras.src.applications.mobilenet_v3 import MobileNetV3Large
from keras.src.applications.mobilenet_v3 import MobileNetV3Small
from keras.src.applications.nasnet import NASNetLarge
from keras.src.applications.nasnet import NASNetMobile
from keras.src.applications.resnet import ResNet50
from keras.src.applications.resnet import ResNet101
from keras.src.applications.resnet import ResNet152
from keras.src.applications.resnet_v2 import ResNet50V2
from keras.src.applications.resnet_v2 import ResNet101V2
from keras.src.applications.resnet_v2 import ResNet152V2
from keras.src.applications.vgg16 import VGG16
from keras.src.applications.vgg19 import VGG19
from keras.src.applications.xception import Xception
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_train_epoch(self, runner, mode: str = 'train') -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (Runner): The runner of the training process.
"""
if hasattr(runner.train_loop.dataloader, 'sampler') and hasattr(
runner.train_loop.dataloader.sampler, 'set_epoch'):
# In case the` _SingleProcessDataLoaderIter` has no sampler,
# or data loader uses `SequentialSampler` in Pytorch.
runner.train_loop.dataloader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.train_loop.dataloader,
'batch_sampler') and hasattr(
runner.train_loop.dataloader.batch_sampler.sampler,
'set_epoch'):
# In case the` _SingleProcessDataLoaderIter` has no batch sampler.
# batch sampler in pytorch warps the sampler as its attributes.
runner.train_loop.dataloader.batch_sampler.sampler.set_epoch(
runner.epoch)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_train_epoch(self, runner, mode: str = 'train') -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (Runner): The runner of the training process.
"""
if hasattr(runner.cur_dataloader.sampler, 'set_epoch'):
# in case the data loader uses `SequentialSampler` in Pytorch
runner.cur_dataloader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.cur_dataloader.batch_sampler.sampler, 'set_epoch'):
# batch sampler in pytorch warps the sampler as its attributes.
runner.cur_dataloader.batch_sampler.sampler.set_epoch(runner.epoch)
|
# coding: utf-8
"""LightGBM, Light Gradient Boosting Machine.
Contributors: https://github.com/microsoft/LightGBM/graphs/contributors.
"""
from pathlib import Path
from .basic import Booster, Dataset, Sequence, register_logger
from .callback import EarlyStopException, early_stopping, log_evaluation, record_evaluation, reset_parameter
from .engine import CVBooster, cv, train
try:
from .sklearn import LGBMClassifier, LGBMModel, LGBMRanker, LGBMRegressor
except ImportError:
pass
try:
from .plotting import create_tree_digraph, plot_importance, plot_metric, plot_split_value_histogram, plot_tree
except ImportError:
pass
try:
from .dask import DaskLGBMClassifier, DaskLGBMRanker, DaskLGBMRegressor
except ImportError:
pass
_version_path = Path(__file__).absolute().parent / "VERSION.txt"
if _version_path.is_file():
__version__ = _version_path.read_text(encoding="utf-8").strip()
__all__ = [
"Dataset",
"Booster",
"CVBooster",
"Sequence",
"register_logger",
"train",
"cv",
"LGBMModel",
"LGBMRegressor",
"LGBMClassifier",
"LGBMRanker",
"DaskLGBMRegressor",
"DaskLGBMClassifier",
"DaskLGBMRanker",
"log_evaluation",
"record_evaluation",
"reset_parameter",
"early_stopping",
"EarlyStopException",
"plot_importance",
"plot_split_value_histogram",
"plot_metric",
"plot_tree",
"create_tree_digraph",
]
|
# coding: utf-8
"""LightGBM, Light Gradient Boosting Machine.
Contributors: https://github.com/microsoft/LightGBM/graphs/contributors.
"""
from pathlib import Path
from .basic import Booster, Dataset, Sequence, register_logger
from .callback import EarlyStopException, early_stopping, log_evaluation, record_evaluation, reset_parameter
from .engine import CVBooster, cv, train
try:
from .sklearn import LGBMClassifier, LGBMModel, LGBMRanker, LGBMRegressor
except ImportError:
pass
try:
from .plotting import create_tree_digraph, plot_importance, plot_metric, plot_split_value_histogram, plot_tree
except ImportError:
pass
try:
from .dask import DaskLGBMClassifier, DaskLGBMRanker, DaskLGBMRegressor
except ImportError:
pass
_version_path = Path(__file__).absolute().parent / "VERSION.txt"
if _version_path.is_file():
__version__ = _version_path.read_text(encoding="utf-8").strip()
__all__ = [
"Dataset",
"Booster",
"CVBooster",
"Sequence",
"register_logger",
"train",
"cv",
"LGBMModel",
"LGBMRegressor",
"LGBMClassifier",
"LGBMRanker",
"DaskLGBMRegressor",
"DaskLGBMClassifier",
"DaskLGBMRanker",
"log_evaluation",
"record_evaluation",
"reset_parameter",
"early_stopping",
"EarlyStopException",
"plot_importance",
"plot_split_value_histogram",
"plot_metric",
"plot_tree",
"create_tree_digraph",
]
|
"""
Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task.
Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings.
It then computes an accuracy over all possible source sentences src_i. Equivalently, it computes also the accuracy for the other direction.
A high accuracy score indicates that the model is able to find the correct translation out of a large pool with sentences.
Good options for datasets are:
* sentence-transformers/parallel-sentences-wikimatrix
* sentence-transformers/parallel-sentences-tatoeba
* sentence-transformers/parallel-sentences-talks
As these have development sets.
Usage:
python examples/evaluation/evaluation_translation_matching.py [model_name_or_path] [dataset_name] [subset1] [subset2] ...
For example:
python examples/evaluation/evaluation_translation_matching.py distiluse-base-multilingual-cased sentence-transformers/parallel-sentences-tatoeba en-ar en-de en-nl
"""
import logging
import sys
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, evaluation
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1]
dataset_name = sys.argv[2]
subsets = sys.argv[3:]
inference_batch_size = 32
model = SentenceTransformer(model_name)
for subset in subsets:
dataset = load_dataset(dataset_name, subset)
datasets = {}
if dataset.column_names == ["train"]:
num_samples = min(5000, len(dataset["train"]))
datasets[f"train[:{num_samples}]"].append(dataset["train"].select(range(num_samples)))
else:
for split, sub_dataset in dataset.items():
if split != "train":
datasets[split] = sub_dataset
for split, sub_dataset in datasets.items():
logging.info(f"{dataset_name}, subset={subset}, split={split}, num_samples={len(sub_dataset)}")
translation_evaluator = evaluation.TranslationEvaluator(
sub_dataset["english"],
sub_dataset["non_english"],
name=f"{dataset_name}-{subset}-{split}",
batch_size=inference_batch_size,
)
translation_evaluator(model)
|
"""
Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task.
Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings.
It then computes an accuracy over all possible source sentences src_i. Equivalently, it computes also the accuracy for the other direction.
A high accuracy score indicates that the model is able to find the correct translation out of a large pool with sentences.
Good options for datasets are:
* sentence-transformers/parallel-sentences-wikimatrix
* sentence-transformers/parallel-sentences-tatoeba
* sentence-transformers/parallel-sentences-talks
As these have development sets.
Usage:
python examples/evaluation/evaluation_translation_matching.py [model_name_or_path] [dataset_name] [subset1] [subset2] ...
For example:
python examples/evaluation/evaluation_translation_matching.py distiluse-base-multilingual-cased sentence-transformers/parallel-sentences-tatoeba en-ar en-de en-nl
"""
from sentence_transformers import SentenceTransformer, evaluation
import sys
import logging
from datasets import load_dataset
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1]
dataset_name = sys.argv[2]
subsets = sys.argv[3:]
inference_batch_size = 32
model = SentenceTransformer(model_name)
for subset in subsets:
dataset = load_dataset(dataset_name, subset)
datasets = {}
if dataset.column_names == ["train"]:
num_samples = min(5000, len(dataset["train"]))
datasets[f"train[:{num_samples}]"].append(dataset["train"].select(range(num_samples)))
else:
for split, sub_dataset in dataset.items():
if split != "train":
datasets[split] = sub_dataset
for split, sub_dataset in datasets.items():
logging.info(f"{dataset_name}, subset={subset}, split={split}, num_samples={len(sub_dataset)}")
translation_evaluator = evaluation.TranslationEvaluator(
sub_dataset["english"],
sub_dataset["non_english"],
name=f"{dataset_name}-{subset}-{split}",
batch_size=inference_batch_size,
)
translation_evaluator(model)
|
from __future__ import annotations
from collections.abc import Iterable
from typing import TYPE_CHECKING
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: SentenceTransformer, output_path: str | None = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluator, "primary_metric"):
scores.append(evaluation[evaluator.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
from __future__ import annotations
from collections.abc import Iterable
from typing import TYPE_CHECKING
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluator, "primary_metric"):
scores.append(evaluation[evaluator.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CrossEncoderCorrelationEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base as base model and set num_labels=1, which predicts a continuous score between 0 and 1
model = CrossEncoder("distilroberta-base", num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
else:
# As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
train_samples.append(InputExample(texts=[row["sentence2"], row["sentence1"]], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CrossEncoderCorrelationEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CrossEncoderCorrelationEvaluator.from_input_examples(test_samples, name="sts-test")
evaluator(model)
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base as base model and set num_labels=1, which predicts a continuous score between 0 and 1
model = CrossEncoder("distilroberta-base", num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
else:
# As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
train_samples.append(InputExample(texts=[row["sentence2"], row["sentence1"]], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CECorrelationEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CECorrelationEvaluator.from_input_examples(test_samples, name="sts-test")
evaluator(model)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dropblock import DropBlock
__all__ = ['DropBlock']
|
from .dropblock import DropBlock
__all__ = ['DropBlock']
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.resnet import ResNet50 as ResNet50
from keras.src.applications.resnet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.resnet import preprocess_input as preprocess_input
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.resnet import ResNet50
from keras.src.applications.resnet import decode_predictions
from keras.src.applications.resnet import preprocess_input
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.host = self.runtime_args.host
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.comet_ml_callback import CometCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CometCallbackHandler": "langchain_community.callbacks.comet_ml_callback",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CometCallbackHandler",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.comet_ml_callback import CometCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CometCallbackHandler": "langchain_community.callbacks.comet_ml_callback"
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CometCallbackHandler",
]
|
import os
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langchain_core.tools import tool
from pydantic import BaseModel
from langchain_community.chat_models import MiniMaxChat
def test_chat_minimax_not_group_id() -> None:
if "MINIMAX_GROUP_ID" in os.environ:
del os.environ["MINIMAX_GROUP_ID"]
chat = MiniMaxChat() # type: ignore[call-arg]
response = chat.invoke("你好呀")
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_chat_minimax_with_stream() -> None:
chat = MiniMaxChat() # type: ignore[call-arg]
for chunk in chat.stream("你好呀"):
assert isinstance(chunk, AIMessage)
assert isinstance(chunk.content, str)
@tool
def add(a: int, b: int) -> int:
"""Adds a and b."""
return a + b
@tool
def multiply(a: int, b: int) -> int:
"""Multiplies a and b."""
return a * b
def test_chat_minimax_with_tool() -> None:
"""Test MinimaxChat with bind tools."""
chat = MiniMaxChat() # type: ignore[call-arg]
tools = [add, multiply]
chat_with_tools = chat.bind_tools(tools)
query = "What is 3 * 12?"
messages = [HumanMessage(query)]
ai_msg = chat_with_tools.invoke(messages)
assert isinstance(ai_msg, AIMessage)
assert isinstance(ai_msg.tool_calls, list)
assert len(ai_msg.tool_calls) == 1
tool_call = ai_msg.tool_calls[0]
assert "args" in tool_call
messages.append(ai_msg) # type: ignore[arg-type]
for tool_call in ai_msg.tool_calls:
selected_tool = {"add": add, "multiply": multiply}[tool_call["name"].lower()]
tool_output = selected_tool.invoke(tool_call["args"]) # type: ignore[attr-defined]
messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"])) # type: ignore[arg-type]
response = chat_with_tools.invoke(messages)
assert isinstance(response, AIMessage)
class AnswerWithJustification(BaseModel):
"""An answer to the user question along with justification for the answer."""
answer: str
justification: str
def test_chat_minimax_with_structured_output() -> None:
"""Test MiniMaxChat with structured output."""
llm = MiniMaxChat() # type: ignore[call-arg]
structured_llm = llm.with_structured_output(AnswerWithJustification)
response = structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
assert isinstance(response, AnswerWithJustification)
def test_chat_tongyi_with_structured_output_include_raw() -> None:
"""Test MiniMaxChat with structured output."""
llm = MiniMaxChat() # type: ignore[call-arg]
structured_llm = llm.with_structured_output(
AnswerWithJustification, include_raw=True
)
response = structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
assert isinstance(response, dict)
assert isinstance(response.get("raw"), AIMessage)
assert isinstance(response.get("parsed"), AnswerWithJustification)
|
import os
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langchain_core.tools import tool
from pydantic import BaseModel
from langchain_community.chat_models import MiniMaxChat
def test_chat_minimax_not_group_id() -> None:
if "MINIMAX_GROUP_ID" in os.environ:
del os.environ["MINIMAX_GROUP_ID"]
chat = MiniMaxChat() # type: ignore[call-arg]
response = chat.invoke("你好呀")
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_chat_minimax_with_stream() -> None:
chat = MiniMaxChat() # type: ignore[call-arg]
for chunk in chat.stream("你好呀"):
assert isinstance(chunk, AIMessage)
assert isinstance(chunk.content, str)
@tool
def add(a: int, b: int) -> int:
"""Adds a and b."""
return a + b
@tool
def multiply(a: int, b: int) -> int:
"""Multiplies a and b."""
return a * b
def test_chat_minimax_with_tool() -> None:
"""Test MinimaxChat with bind tools."""
chat = MiniMaxChat() # type: ignore[call-arg]
tools = [add, multiply]
chat_with_tools = chat.bind_tools(tools)
query = "What is 3 * 12?"
messages = [HumanMessage(query)]
ai_msg = chat_with_tools.invoke(messages)
assert isinstance(ai_msg, AIMessage)
assert isinstance(ai_msg.tool_calls, list)
assert len(ai_msg.tool_calls) == 1
tool_call = ai_msg.tool_calls[0]
assert "args" in tool_call
messages.append(ai_msg) # type: ignore[arg-type]
for tool_call in ai_msg.tool_calls:
selected_tool = {"add": add, "multiply": multiply}[tool_call["name"].lower()]
tool_output = selected_tool.invoke(tool_call["args"]) # type: ignore[attr-defined]
messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"])) # type: ignore[arg-type]
response = chat_with_tools.invoke(messages)
assert isinstance(response, AIMessage)
class AnswerWithJustification(BaseModel):
"""An answer to the user question along with justification for the answer."""
answer: str
justification: str
def test_chat_minimax_with_structured_output() -> None:
"""Test MiniMaxChat with structured output."""
llm = MiniMaxChat() # type: ignore
structured_llm = llm.with_structured_output(AnswerWithJustification)
response = structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
assert isinstance(response, AnswerWithJustification)
def test_chat_tongyi_with_structured_output_include_raw() -> None:
"""Test MiniMaxChat with structured output."""
llm = MiniMaxChat() # type: ignore
structured_llm = llm.with_structured_output(
AnswerWithJustification, include_raw=True
)
response = structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
assert isinstance(response, dict)
assert isinstance(response.get("raw"), AIMessage)
assert isinstance(response.get("parsed"), AnswerWithJustification)
|
_base_ = './detic_centernet2_r50_fpn_4x_lvis_boxsup.py'
dataset_type = ['LVISV1Dataset', 'ImageNetLVISV1Dataset']
image_size_det = (640, 640)
image_size_cls = (320, 320)
# backend = 'pillow'
backend_args = None
train_pipeline_det = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size_det,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size_det,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_pipeline_cls = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=False, with_label=True),
dict(
type='RandomResize',
scale=image_size_cls,
ratio_range=(0.5, 1.5),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size_cls,
recompute_bbox=False,
bbox_clip_border=False,
allow_negative_crop=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
# 'lvis_v1_train_norare.json' is the annotations of lvis_v1
# removing the labels of 337 rare-class
dataset_det = dict(
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type='LVISV1Dataset',
data_root='data/lvis/',
ann_file='annotations/lvis_v1_train_norare.json',
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline_det,
backend_args=backend_args))
dataset_cls = dict(
type='ImageNetLVISV1Dataset',
data_root='data/imagenet',
ann_file='annotations/imagenet_lvis_image_info.json',
data_prefix=dict(img='ImageNet-LVIS/'),
pipeline=train_pipeline_cls,
backend_args=backend_args)
train_dataloader = dict(
_delete_=True,
batch_size=[8, 32],
num_workers=2,
persistent_workers=True,
sampler=dict(type='MultiDataSampler', dataset_ratio=[1, 4]),
batch_sampler=dict(
type='MultiDataAspectRatioBatchSampler', num_datasets=2),
dataset=dict(type='ConcatDataset', datasets=[dataset_det, dataset_cls]))
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='CosineAnnealingLR',
begin=0,
by_epoch=False,
T_max=90000,
)
]
load_from = './first_stage/detic_centernet2_r50_fpn_4x_lvis-base_boxsup.pth'
find_unused_parameters = True
|
_base_ = './detic_centernet2_r50_fpn_4x_lvis_boxsup.py'
image_size_det = (640, 640)
image_size_cls = (320, 320)
# backend = 'pillow'
backend_args = None
train_pipeline_det = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size_det,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size_det,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_pipeline_cls = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=False, with_label=True),
dict(
type='RandomResize',
scale=image_size_cls,
ratio_range=(0.5, 1.5),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size_cls,
recompute_bbox=False,
bbox_clip_border=False,
allow_negative_crop=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
# 'lvis_v1_train_norare.json' is the annotations of lvis_v1
# removing the labels of 337 rare-class
dataset_det = dict(
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type='LVISV1Dataset',
data_root='data/lvis/',
ann_file='annotations/lvis_v1_train.json',
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline_det,
backend_args=backend_args))
dataset_cls = dict(
type='ImageNetLVISV1Dataset',
data_root='data/imagenet',
ann_file='annotations/imagenet_lvis_image_info.json',
data_prefix=dict(img='ImageNet-LVIS/'),
pipeline=train_pipeline_cls,
backend_args=backend_args)
train_dataloader = dict(
_delete_=True,
batch_size=[8, 32],
num_workers=2,
persistent_workers=True,
sampler=dict(type='MultiDataSampler', dataset_ratio=[1, 4]),
batch_sampler=dict(
type='MultiDataAspectRatioBatchSampler', num_datasets=2),
dataset=dict(type='ConcatDataset', datasets=[dataset_det, dataset_cls]))
load_from = './first_stage/detic_centernet2_r50_fpn_4x_lvis-base_boxsup.pth'
find_unused_parameters = True
|
_base_ = 'deformable-detr_refine_r50_16xb2-50e_coco.py'
model = dict(bbox_head=dict(as_two_stage=True))
|
_base_ = 'deformable_detr_refine_r50_16x2_50e_coco.py'
model = dict(bbox_head=dict(as_two_stage=True))
|
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='AudioBytes')
@_register_proto(proto_type_name='audio_bytes')
class AudioBytes(bytes, AbstractType):
"""
Bytes that store an audio and that can be load into an Audio tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self) -> Tuple[np.ndarray, int]:
"""
Load the Audio from the bytes into a numpy.ndarray Audio tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
import numpy as np
from docarray.typing import AudioUrl
class MyAudio(Document):
url: AudioUrl
tensor: Optional[NdArray]
bytes: Optional[bytes]
doc = MyAudio(url="toydata/hello.wav")
doc.bytes = doc.url.load_bytes()
doc.tensor, doc.frame_rate = doc.bytes.load()
# Note this is equivalent to do
doc.tensor, doc.frame_rate = doc.url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
:return: np.ndarray representing the Audio as RGB values
"""
from pydub import AudioSegment # type: ignore
segment = AudioSegment.from_file(io.BytesIO(self))
# Convert to float32 using NumPy
samples = np.array(segment.get_array_of_samples())
# Normalise float32 array so that values are between -1.0 and +1.0
samples_norm = samples / 2 ** (segment.sample_width * 8 - 1)
return samples_norm, segment.frame_rate
|
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='AudioBytes')
@_register_proto(proto_type_name='audio_bytes')
class AudioBytes(bytes, AbstractType):
"""
Bytes that store an audio and that can be load into an Audio tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self) -> Tuple[np.ndarray, int]:
"""
Load the Audio from the bytes into a numpy.ndarray Audio tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import AudioUrl
class MyAudio(Document):
url: AudioUrl
tensor: Optional[NdArray]
bytes: Optional[bytes]
doc = MyAudio(url="toydata/hello.wav")
doc.bytes = doc.url.load_bytes()
doc.tensor, doc.frame_rate = doc.bytes.load()
# Note this is equivalent to do
doc.tensor, doc.frame_rate = doc.url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
:return: np.ndarray representing the Audio as RGB values
"""
from pydub import AudioSegment # type: ignore
segment = AudioSegment.from_file(io.BytesIO(self))
# Convert to float32 using NumPy
samples = np.array(segment.get_array_of_samples())
# Normalise float32 array so that values are between -1.0 and +1.0
samples_norm = samples / 2 ** (segment.sample_width * 8 - 1)
return samples_norm, segment.frame_rate
|
"""
This is an example how to train SentenceTransformers in a multi-task setup.
The system trains BERT on the AllNLI and on the STSbenchmark dataset.
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses
from sentence_transformers import LoggingHandler, SentenceTransformer, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import gzip
import csv
import os
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Read the dataset
model_name = "bert-base-uncased"
batch_size = 16
model_save_path = "output/training_multi-task_" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Check if dataset exists. If not, download and extract it
nli_dataset_path = "datasets/AllNLI.tsv.gz"
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Use BERT for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_nli_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
train_nli_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
train_dataloader_nli = DataLoader(train_nli_samples, shuffle=True, batch_size=batch_size)
train_loss_nli = losses.SoftmaxLoss(
model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=len(label2int)
)
logging.info("Read STSbenchmark train dataset")
train_sts_samples = []
dev_sts_samples = []
test_sts_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_sts_samples.append(inp_example)
elif row["split"] == "test":
test_sts_samples.append(inp_example)
else:
train_sts_samples.append(inp_example)
train_dataloader_sts = DataLoader(train_sts_samples, shuffle=True, batch_size=batch_size)
train_loss_sts = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_sts_samples, name="sts-dev")
# Configure the training
num_epochs = 4
warmup_steps = math.ceil(len(train_dataloader_sts) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Here we define the two train objectives: train_dataloader_nli with train_loss_nli (i.e., SoftmaxLoss for NLI data)
# and train_dataloader_sts with train_loss_sts (i.e., CosineSimilarityLoss for STSbenchmark data)
# You can pass as many (dataloader, loss) tuples as you like. They are iterated in a round-robin way.
train_objectives = [(train_dataloader_nli, train_loss_nli), (train_dataloader_sts, train_loss_sts)]
# Train the model
model.fit(
train_objectives=train_objectives,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_sts_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
|
"""
This is an example how to train SentenceTransformers in a multi-task setup.
The system trains BERT on the AllNLI and on the STSbenchmark dataset.
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses
from sentence_transformers import LoggingHandler, SentenceTransformer, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import gzip
import csv
import os
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Read the dataset
model_name = "bert-base-uncased"
batch_size = 16
model_save_path = "output/training_multi-task_" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Check if dataset exists. If not, download and extract it
nli_dataset_path = "datasets/AllNLI.tsv.gz"
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Use BERT for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_nli_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
train_nli_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
train_dataloader_nli = DataLoader(train_nli_samples, shuffle=True, batch_size=batch_size)
train_loss_nli = losses.SoftmaxLoss(
model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=len(label2int)
)
logging.info("Read STSbenchmark train dataset")
train_sts_samples = []
dev_sts_samples = []
test_sts_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_sts_samples.append(inp_example)
elif row["split"] == "test":
test_sts_samples.append(inp_example)
else:
train_sts_samples.append(inp_example)
train_dataloader_sts = DataLoader(train_sts_samples, shuffle=True, batch_size=batch_size)
train_loss_sts = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_sts_samples, name="sts-dev")
# Configure the training
num_epochs = 4
warmup_steps = math.ceil(len(train_dataloader_sts) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Here we define the two train objectives: train_dataloader_nli with train_loss_nli (i.e., SoftmaxLoss for NLI data)
# and train_dataloader_sts with train_loss_sts (i.e., CosineSimilarityLoss for STSbenchmark data)
# You can pass as many (dataloader, loss) tuples as you like. They are iterated in a round-robin way.
train_objectives = [(train_dataloader_nli, train_loss_nli), (train_dataloader_sts, train_loss_sts)]
# Train the model
model.fit(
train_objectives=train_objectives,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_sts_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
|
from docarray.typing.id import ID
from docarray.typing.tensor import NdArray, Tensor
from docarray.typing.tensor.embedding import Embedding
from docarray.typing.url import AnyUrl, ImageUrl, TextUrl
__all__ = [
'NdArray',
'Embedding',
'ImageUrl',
'TextUrl',
'AnyUrl',
'ID',
'Tensor',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
__all__.extend(['TorchEmbedding', 'TorchTensor'])
|
from docarray.typing.id import ID
from docarray.typing.tensor import NdArray, Tensor, TorchEmbedding, TorchTensor
from docarray.typing.tensor.embedding import Embedding
from docarray.typing.url import AnyUrl, ImageUrl, TextUrl
__all__ = [
'TorchTensor',
'NdArray',
'Embedding',
'ImageUrl',
'TextUrl',
'AnyUrl',
'ID',
'Tensor',
'TorchEmbedding',
]
|
import posixpath
from pathlib import Path
from unittest.mock import patch
import pytest
from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path
from fsspec.registry import _registry as _fsspec_registry
class MockFileSystem(AbstractFileSystem):
protocol = "mock"
def __init__(self, *args, local_root_dir, **kwargs):
super().__init__()
self._fs = LocalFileSystem(*args, **kwargs)
self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/"
def mkdir(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.mkdir(path, *args, **kwargs)
def makedirs(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.makedirs(path, *args, **kwargs)
def rmdir(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rmdir(path)
def ls(self, path, detail=True, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = self._fs.ls(path, detail=detail, *args, **kwargs)
if detail:
return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out]
else:
return [name[len(self.local_root_dir) :] for name in out]
def info(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = dict(self._fs.info(path, *args, **kwargs))
out["name"] = out["name"][len(self.local_root_dir) :]
return out
def cp_file(self, path1, path2, *args, **kwargs):
path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1))
path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2))
return self._fs.cp_file(path1, path2, *args, **kwargs)
def rm_file(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm_file(path, *args, **kwargs)
def rm(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm(path, *args, **kwargs)
def _open(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs._open(path, *args, **kwargs)
def created(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.created(path)
def modified(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.modified(path)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("mock://"):
path = path[7:]
return path
class TmpDirFileSystem(MockFileSystem):
protocol = "tmp"
tmp_dir = None
def __init__(self, *args, **kwargs):
assert self.tmp_dir is not None, "TmpDirFileSystem.tmp_dir is not set"
super().__init__(*args, **kwargs, local_root_dir=self.tmp_dir, auto_mkdir=True)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("tmp://"):
path = path[6:]
return path
@pytest.fixture
def mock_fsspec():
_fsspec_registry["mock"] = MockFileSystem
_fsspec_registry["tmp"] = TmpDirFileSystem
yield
del _fsspec_registry["mock"]
del _fsspec_registry["tmp"]
@pytest.fixture
def mockfs(tmp_path_factory, mock_fsspec):
local_fs_dir = tmp_path_factory.mktemp("mockfs")
return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True)
@pytest.fixture
def tmpfs(tmp_path_factory, mock_fsspec):
tmp_fs_dir = tmp_path_factory.mktemp("tmpfs")
with patch.object(TmpDirFileSystem, "tmp_dir", tmp_fs_dir):
yield TmpDirFileSystem()
TmpDirFileSystem.clear_instance_cache()
|
import posixpath
from pathlib import Path
from unittest.mock import patch
import pytest
from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path
from fsspec.registry import _registry as _fsspec_registry
class MockFileSystem(AbstractFileSystem):
protocol = "mock"
def __init__(self, *args, local_root_dir, **kwargs):
super().__init__()
self._fs = LocalFileSystem(*args, **kwargs)
self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/"
def mkdir(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.mkdir(path, *args, **kwargs)
def makedirs(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.makedirs(path, *args, **kwargs)
def rmdir(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rmdir(path)
def ls(self, path, detail=True, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = self._fs.ls(path, detail=detail, *args, **kwargs)
if detail:
return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out]
else:
return [name[len(self.local_root_dir) :] for name in out]
def info(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = dict(self._fs.info(path, *args, **kwargs))
out["name"] = out["name"][len(self.local_root_dir) :]
return out
def cp_file(self, path1, path2, *args, **kwargs):
path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1))
path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2))
return self._fs.cp_file(path1, path2, *args, **kwargs)
def rm_file(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm_file(path, *args, **kwargs)
def rm(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm(path, *args, **kwargs)
def _open(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs._open(path, *args, **kwargs)
def created(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.created(path)
def modified(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.modified(path)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("mock://"):
path = path[7:]
return path
class TmpDirFileSystem(MockFileSystem):
protocol = "tmp"
tmp_dir = None
def __init__(self, *args, **kwargs):
assert self.tmp_dir is not None, "TmpDirFileSystem.tmp_dir is not set"
super().__init__(*args, **kwargs, local_root_dir=self.tmp_dir, auto_mkdir=True)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("tmp://"):
path = path[6:]
return path
@pytest.fixture
def mock_fsspec():
_fsspec_registry["mock"] = MockFileSystem
_fsspec_registry["tmp"] = TmpDirFileSystem
yield
del _fsspec_registry["mock"]
del _fsspec_registry["tmp"]
@pytest.fixture
def mockfs(tmp_path_factory, mock_fsspec):
local_fs_dir = tmp_path_factory.mktemp("mockfs")
return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True)
@pytest.fixture
def tmpfs(tmp_path_factory, mock_fsspec):
tmp_fs_dir = tmp_path_factory.mktemp("tmpfs")
with patch.object(TmpDirFileSystem, "tmp_dir", tmp_fs_dir):
yield TmpDirFileSystem()
|
import unittest
import torch
from mmengine.data import PixelData
from mmengine.testing import assert_allclose
from mmdet.data_elements import DetDataSample
from mmdet.models.seg_heads import PanopticFPNHead
class TestPanopticFPNHead(unittest.TestCase):
def test_init_weights(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=1,
inner_channels=1)
head.init_weights()
assert_allclose(head.conv_logits.bias.data,
torch.zeros_like(head.conv_logits.bias.data))
def test_loss(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
data_sample1 = DetDataSample()
data_sample1.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
data_sample2 = DetDataSample()
data_sample2.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
batch_data_samples = [data_sample1, data_sample2]
results = head.loss(x, batch_data_samples)
self.assertIsInstance(results, dict)
def test_predict(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
img_meta1 = {
'batch_input_shape': (16, 16),
'img_shape': (14, 14),
'ori_shape': (12, 12),
}
img_meta2 = {
'batch_input_shape': (16, 16),
'img_shape': (16, 16),
'ori_shape': (16, 16),
}
batch_img_metas = [img_meta1, img_meta2]
head.eval()
with torch.no_grad():
seg_preds = head.predict(x, batch_img_metas, rescale=False)
self.assertTupleEqual(seg_preds[0].shape[-2:], (16, 16))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
seg_preds = head.predict(x, batch_img_metas, rescale=True)
self.assertTupleEqual(seg_preds[0].shape[-2:], (12, 12))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
|
import unittest
import torch
from mmengine.data import PixelData
from mmengine.testing import assert_allclose
from mmdet.core.data_structures import DetDataSample
from mmdet.models.seg_heads import PanopticFPNHead
class TestPanopticFPNHead(unittest.TestCase):
def test_init_weights(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=1,
inner_channels=1)
head.init_weights()
assert_allclose(head.conv_logits.bias.data,
torch.zeros_like(head.conv_logits.bias.data))
def test_loss(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
data_sample1 = DetDataSample()
data_sample1.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
data_sample2 = DetDataSample()
data_sample2.gt_sem_seg = PixelData(
sem_seg=torch.randint(0, 4, (1, 7, 8)))
batch_data_samples = [data_sample1, data_sample2]
results = head.loss(x, batch_data_samples)
self.assertIsInstance(results, dict)
def test_predict(self):
head = PanopticFPNHead(
num_things_classes=2,
num_stuff_classes=2,
in_channels=32,
inner_channels=32,
start_level=0,
end_level=1)
x = [torch.rand((2, 32, 8, 8)), torch.rand((2, 32, 4, 4))]
img_meta1 = {
'batch_input_shape': (16, 16),
'img_shape': (14, 14),
'ori_shape': (12, 12),
}
img_meta2 = {
'batch_input_shape': (16, 16),
'img_shape': (16, 16),
'ori_shape': (16, 16),
}
batch_img_metas = [img_meta1, img_meta2]
head.eval()
with torch.no_grad():
seg_preds = head.predict(x, batch_img_metas, rescale=False)
self.assertTupleEqual(seg_preds[0].shape[-2:], (16, 16))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
seg_preds = head.predict(x, batch_img_metas, rescale=True)
self.assertTupleEqual(seg_preds[0].shape[-2:], (12, 12))
self.assertTupleEqual(seg_preds[1].shape[-2:], (16, 16))
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor')
PALETTE = [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if 'VOC2007' in self.img_prefix:
self.year = 2007
elif 'VOC2012' in self.img_prefix:
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate in VOC protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'mAP', 'recall'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple], optional): Scale ranges for evaluating
mAP. If not specified, all bounding boxes would be included in
evaluation. Default: None.
Returns:
dict[str, float]: AP/recall metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
if self.year == 2007:
ds_name = 'voc07'
else:
ds_name = self.CLASSES
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
# Follow the official implementation,
# http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar
# we should use the legacy coordinate system in mmdet 1.x,
# which means w, h should be computed as 'x2 - x1 + 1` and
# `y2 - y1 + 1`
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=ds_name,
logger=logger,
use_legacy_coordinate=True)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
eval_results.move_to_end('mAP', last=False)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes,
results,
proposal_nums,
iou_thrs,
logger=logger,
use_legacy_coordinate=True)
for i, num in enumerate(proposal_nums):
for j, iou_thr in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor')
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if 'VOC2007' in self.img_prefix:
self.year = 2007
elif 'VOC2012' in self.img_prefix:
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate in VOC protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'mAP', 'recall'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple], optional): Scale ranges for evaluating
mAP. If not specified, all bounding boxes would be included in
evaluation. Default: None.
Returns:
dict[str, float]: AP/recall metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
if self.year == 2007:
ds_name = 'voc07'
else:
ds_name = self.CLASSES
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
# Follow the official implementation,
# http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar
# we should use the legacy coordinate system in mmdet 1.x,
# which means w, h should be computed as 'x2 - x1 + 1` and
# `y2 - y1 + 1`
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=ds_name,
logger=logger,
use_legacy_coordinate=True)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes,
results,
proposal_nums,
iou_thrs,
logger=logger,
use_legacy_coordinate=True)
for i, num in enumerate(proposal_nums):
for j, iou_thr in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', type=int, default=0, help='camera device id')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
register_all_modules()
# build the model from a config file and a checkpoint file
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
ret_val, img = camera.read()
result = inference_detector(model, img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
visualizer.add_datasample(
name='result',
image=img,
data_sample=result,
draw_gt=False,
pred_score_thr=args.score_thr,
show=False)
img = visualizer.get_image()
img = mmcv.imconvert(img, 'bgr', 'rgb')
cv2.imshow('result', img)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', type=int, default=0, help='camera device id')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
register_all_modules()
# build the model from a config file and a checkpoint file
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
ret_val, img = camera.read()
result = inference_detector(model, img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
visualizer.add_datasample(
name='result',
image=img,
pred_sample=result,
pred_score_thr=args.score_thr,
show=False)
img = visualizer.get_image()
img = mmcv.imconvert(img, 'bgr', 'rgb')
cv2.imshow('result', img)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
if __name__ == '__main__':
main()
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well even when removing some layers.
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python adaptive_layer_sts.py
OR
python adaptive_layer_sts.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
losses,
)
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 4
# Save path of the model
output_dir = f"output/adaptive_layer_sts_{model_name.replace('/', '-')}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}"
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CoSENTLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) needs two text
# columns and one similarity score column (between 0 and 1)
inner_train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.AdaptiveLayerLoss(model, inner_train_loss)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="adaptive-layer-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-adaptive-layer")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-adaptive-layer')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well even when removing some layers.
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python adaptive_layer_sts.py
OR
python adaptive_layer_sts.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
losses,
)
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 4
# Save path of the model
output_dir = f"output/adaptive_layer_sts_{model_name.replace('/', '-')}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}"
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CoSENTLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) needs two text
# columns and one similarity score column (between 0 and 1)
inner_train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.AdaptiveLayerLoss(model, inner_train_loss)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="adaptive-layer-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-adaptive-layer")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-adaptive-layer')`."
)
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then k-mean clustering is applied.
"""
from sklearn.cluster import KMeans
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Perform kmean clustering
num_clusters = 5
clustering_model = KMeans(n_clusters=num_clusters)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = [[] for i in range(num_clusters)]
for sentence_id, cluster_id in enumerate(cluster_assignment):
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in enumerate(clustered_sentences):
print("Cluster ", i + 1)
print(cluster)
print("")
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then k-mean clustering is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
embedder = SentenceTransformer('all-MiniLM-L6-v2')
# Corpus with example sentences
corpus = ['A man is eating food.',
'A man is eating a piece of bread.',
'A man is eating pasta.',
'The girl is carrying a baby.',
'The baby is carried by the woman',
'A man is riding a horse.',
'A man is riding a white horse on an enclosed ground.',
'A monkey is playing drums.',
'Someone in a gorilla costume is playing a set of drums.',
'A cheetah is running behind its prey.',
'A cheetah chases prey on across a field.'
]
corpus_embeddings = embedder.encode(corpus)
# Perform kmean clustering
num_clusters = 5
clustering_model = KMeans(n_clusters=num_clusters)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = [[] for i in range(num_clusters)]
for sentence_id, cluster_id in enumerate(cluster_assignment):
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in enumerate(clustered_sentences):
print("Cluster ", i+1)
print(cluster)
print("")
|
"""
Python polyfills for sys
"""
from __future__ import annotations
import sys
from ..decorators import substitute_in_graph
__all__ = [
"intern",
"getrecursionlimit",
]
@substitute_in_graph(sys.intern, can_constant_fold_through=True)
def intern(string: str, /) -> str:
return string
@substitute_in_graph(sys.getrecursionlimit, can_constant_fold_through=True)
def getrecursionlimit() -> int:
return sys.getrecursionlimit()
|
"""
Python polyfills for sys
"""
from __future__ import annotations
import sys
from ..decorators import substitute_in_graph
__all__ = [
"intern",
"getrecursionlimit",
]
@substitute_in_graph(sys.intern, can_constant_fold_through=True)
def intern(string: str, /) -> str:
return string
@substitute_in_graph(sys.getrecursionlimit, can_constant_fold_through=True)
def getrecursionlimit() -> int:
return sys.getrecursionlimit()
@substitute_in_graph(sys.get_int_max_str_digits, can_constant_fold_through=True)
def get_int_max_str_digits() -> int:
return sys.get_int_max_str_digits()
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import SchedulerMixin, UNet2DModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class CustomLocalPipeline(DiffusionPipeline):
r"""
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Parameters:
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
[`DDPMScheduler`], or [`DDIMScheduler`].
"""
def __init__(self, unet: UNet2DModel, scheduler: SchedulerMixin):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
generator: Optional[torch.Generator] = None,
num_inference_steps: int = 50,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of images to generate.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
eta (`float`, *optional*, defaults to 0.0):
The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
Returns:
[`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
generated images.
"""
# Sample gaussian noise to begin loop
image = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator,
)
image = image.to(self.device)
# set step values
self.scheduler.set_timesteps(num_inference_steps)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(image, t).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
image = self.scheduler.step(model_output, t, image).prev_sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=image), "This is a local test"
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class CustomLocalPipeline(DiffusionPipeline):
r"""
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Parameters:
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
[`DDPMScheduler`], or [`DDIMScheduler`].
"""
def __init__(self, unet, scheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
generator: Optional[torch.Generator] = None,
num_inference_steps: int = 50,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of images to generate.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
eta (`float`, *optional*, defaults to 0.0):
The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
Returns:
[`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
generated images.
"""
# Sample gaussian noise to begin loop
image = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator,
)
image = image.to(self.device)
# set step values
self.scheduler.set_timesteps(num_inference_steps)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(image, t).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
image = self.scheduler.step(model_output, t, image).prev_sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=image), "This is a local test"
|
import os
import subprocess
import time
from typing import List
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('docker-compose-testing')
@pytest.fixture
def image_name_tag_map():
return {
'reload-executor': '0.13.1',
'test-executor': '0.13.1',
#'test-executor-torch': '0.13.1',
'executor-merger': '0.1.1',
'custom-gateway': '0.1.1',
'multiprotocol-gateway': '0.1.1',
'jinaai/jina': 'test-pip',
}
def build_docker_image(image_name, image_name_tag_map):
logger = JinaLogger('docker-compose-testing')
image_tag = image_name + ':' + image_name_tag_map[image_name]
image, build_logs = client.images.build(
path=os.path.join(cur_dir, image_name), tag=image_tag
)
for chunk in build_logs:
if 'stream' in chunk:
for line in chunk['stream'].splitlines():
logger.debug(line)
return image.tags[-1]
@pytest.fixture(autouse=True)
def set_test_pip_version():
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
del os.environ['JINA_GATEWAY_IMAGE']
@pytest.fixture(autouse=True)
def build_images(image_name_tag_map):
for image in image_name_tag_map.keys():
if image != 'jinaai/jina':
build_docker_image(image, image_name_tag_map)
@pytest.fixture
def docker_images(request, image_name_tag_map):
image_names = request.param
images = [
image_name + ':' + image_name_tag_map[image_name] for image_name in image_names
]
return images
class DockerComposeServices:
healthy_status = 'healthy'
unhealthy_status = 'unhealthy'
def __init__(self, dump_path, timeout_second=30):
self.dump_path = dump_path
self.timeout_second = timeout_second
def __enter__(self):
subprocess.run(
f'docker compose -f {self.dump_path} up --build -d --remove-orphans'.split(
' '
)
)
container_ids = (
subprocess.run(
f'docker compose -f {self.dump_path} ps -q'.split(' '),
capture_output=True,
)
.stdout.decode("utf-8")
.split('\n')
)
container_ids.remove('') # remove empty return line
if not container_ids:
raise RuntimeError('docker-compose ps did not detect any launch container')
client = docker.from_env()
init_time = time.time()
healthy = False
while time.time() - init_time < self.timeout_second:
if self._are_all_container_healthy(container_ids, client):
healthy = True
break
time.sleep(0.1)
if not healthy:
raise RuntimeError('Docker containers are not healthy')
@staticmethod
def _are_all_container_healthy(
container_ids: List[str], client: docker.client.DockerClient
) -> bool:
for id_ in container_ids:
status = client.containers.get(id_).attrs['State']['Health']['Status']
if status != DockerComposeServices.healthy_status:
return False
return True
def __exit__(self, exc_type, exc_val, exc_tb):
subprocess.run(
f'docker compose -f {self.dump_path} down --remove-orphans'.split(' ')
)
|
import os
import subprocess
import time
from typing import List
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('docker-compose-testing')
@pytest.fixture
def image_name_tag_map():
return {
'reload-executor': '0.13.1',
'test-executor': '0.13.1',
'test-executor-torch': '0.13.1',
'executor-merger': '0.1.1',
'custom-gateway': '0.1.1',
'multiprotocol-gateway': '0.1.1',
'jinaai/jina': 'test-pip',
}
def build_docker_image(image_name, image_name_tag_map):
logger = JinaLogger('docker-compose-testing')
image_tag = image_name + ':' + image_name_tag_map[image_name]
image, build_logs = client.images.build(
path=os.path.join(cur_dir, image_name), tag=image_tag
)
for chunk in build_logs:
if 'stream' in chunk:
for line in chunk['stream'].splitlines():
logger.debug(line)
return image.tags[-1]
@pytest.fixture(autouse=True)
def set_test_pip_version():
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
del os.environ['JINA_GATEWAY_IMAGE']
@pytest.fixture(autouse=True)
def build_images(image_name_tag_map):
for image in image_name_tag_map.keys():
if image != 'jinaai/jina':
build_docker_image(image, image_name_tag_map)
@pytest.fixture
def docker_images(request, image_name_tag_map):
image_names = request.param
images = [
image_name + ':' + image_name_tag_map[image_name] for image_name in image_names
]
return images
class DockerComposeServices:
healthy_status = 'healthy'
unhealthy_status = 'unhealthy'
def __init__(self, dump_path, timeout_second=30):
self.dump_path = dump_path
self.timeout_second = timeout_second
def __enter__(self):
subprocess.run(
f'docker compose -f {self.dump_path} up --build -d --remove-orphans'.split(
' '
)
)
container_ids = (
subprocess.run(
f'docker compose -f {self.dump_path} ps -q'.split(' '),
capture_output=True,
)
.stdout.decode("utf-8")
.split('\n')
)
container_ids.remove('') # remove empty return line
if not container_ids:
raise RuntimeError('docker-compose ps did not detect any launch container')
client = docker.from_env()
init_time = time.time()
healthy = False
while time.time() - init_time < self.timeout_second:
if self._are_all_container_healthy(container_ids, client):
healthy = True
break
time.sleep(0.1)
if not healthy:
raise RuntimeError('Docker containers are not healthy')
@staticmethod
def _are_all_container_healthy(
container_ids: List[str], client: docker.client.DockerClient
) -> bool:
for id_ in container_ids:
status = client.containers.get(id_).attrs['State']['Health']['Status']
if status != DockerComposeServices.healthy_status:
return False
return True
def __exit__(self, exc_type, exc_val, exc_tb):
subprocess.run(
f'docker compose -f {self.dump_path} down --remove-orphans'.split(' ')
)
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Llama4 model."""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import (
require_read_token,
require_torch_large_accelerator,
slow,
torch_device,
)
if is_torch_available():
import torch
from transformers import (
Llama4ForConditionalGeneration,
Llama4Processor,
)
@slow
@require_torch_large_accelerator
@require_read_token
class Llama4IntegrationTest(unittest.TestCase):
model_id = "meta-llama/Llama-4-Scout-17B-16E"
@classmethod
def setUpClass(cls):
cls.model = Llama4ForConditionalGeneration.from_pretrained(
"meta-llama/Llama-4-Scout-17B-16E",
device_map="auto",
torch_dtype=torch.float32,
attn_implementation="eager",
)
def setUp(self):
self.processor = Llama4Processor.from_pretrained("meta-llama/Llama-4-Scout-17B-16E", padding_side="left")
url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png"
self.messages_1 = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{"type": "image", "url": url},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
self.messages_2 = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
},
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
{"type": "text", "text": "Are these images identical?"},
],
},
]
def test_model_17b_16e_fp16(self):
EXPECTED_TEXT = [
'system\n\nYou are a helpful assistant.user\n\nWhat is shown in this image?assistant\n\nThe image shows a cow standing on a beach, with a blue sky and a body of water in the background. The cow is brown with a white'
] # fmt: skip
inputs = self.processor.apply_chat_template(
self.messages_1, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True
).to(device=torch_device, dtype=self.model.dtype)
output = self.model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
print(output_text)
self.assertEqual(output_text, EXPECTED_TEXT)
def test_model_17b_16e_batch(self):
inputs = self.processor.apply_chat_template(
[self.messages_1, self.messages_2],
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
add_generation_prompt=True,
).to(device=torch_device, dtype=torch.float32)
output = self.model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = [
'system\n\nYou are a helpful assistant.user\n\nWhat is shown in this image?assistant\n\nThe image shows a cow standing on a beach, with a blue sky and a body of water in the background. The cow is brown with a white',
'system\n\nYou are a helpful assistant.user\n\nAre these images identical?assistant\n\nNo, these images are not identical. The first image shows a cow standing on a beach with a blue sky and a white cloud in the background.'
] # fmt: skip
self.assertEqual(output_text, EXPECTED_TEXTS)
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Llama4 model."""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import (
require_read_token,
require_torch_large_gpu,
slow,
torch_device,
)
if is_torch_available():
import torch
from transformers import (
Llama4ForConditionalGeneration,
Llama4Processor,
)
@slow
@require_torch_large_gpu
@require_read_token
class Llama4IntegrationTest(unittest.TestCase):
model_id = "meta-llama/Llama-4-Scout-17B-16E"
@classmethod
def setUpClass(cls):
cls.model = Llama4ForConditionalGeneration.from_pretrained(
"meta-llama/Llama-4-Scout-17B-16E",
device_map="auto",
torch_dtype=torch.float32,
attn_implementation="eager",
)
def setUp(self):
self.processor = Llama4Processor.from_pretrained("meta-llama/Llama-4-Scout-17B-16E", padding_side="left")
url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png"
self.messages_1 = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{"type": "image", "url": url},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
self.messages_2 = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
},
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
{"type": "text", "text": "Are these images identical?"},
],
},
]
def test_model_17b_16e_fp16(self):
EXPECTED_TEXT = [
'system\n\nYou are a helpful assistant.user\n\nWhat is shown in this image?assistant\n\nThe image shows a cow standing on a beach, with a blue sky and a body of water in the background. The cow is brown with a white'
] # fmt: skip
inputs = self.processor.apply_chat_template(
self.messages_1, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True
).to(device=torch_device, dtype=self.model.dtype)
output = self.model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
print(output_text)
self.assertEqual(output_text, EXPECTED_TEXT)
def test_model_17b_16e_batch(self):
inputs = self.processor.apply_chat_template(
[self.messages_1, self.messages_2],
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
add_generation_prompt=True,
).to(device=torch_device, dtype=torch.float32)
output = self.model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = [
'system\n\nYou are a helpful assistant.user\n\nWhat is shown in this image?assistant\n\nThe image shows a cow standing on a beach, with a blue sky and a body of water in the background. The cow is brown with a white',
'system\n\nYou are a helpful assistant.user\n\nAre these images identical?assistant\n\nNo, these images are not identical. The first image shows a cow standing on a beach with a blue sky and a white cloud in the background.'
] # fmt: skip
self.assertEqual(output_text, EXPECTED_TEXTS)
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _extract_tar
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""*LJSpeech-1.1* :cite:`ljspeech17` dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / "metadata.csv"
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
_extract_tar(archive)
else:
if not os.path.exists(self._path):
raise RuntimeError(
f"The path {self._path} doesn't exist. "
"Please check the ``root`` path or set `download=True` to download it"
)
with open(self._metadata_path, "r", newline="") as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
Transcript
str:
Normalized Transcript
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
import csv
import os
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""*LJSpeech-1.1* :cite:`ljspeech17` dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / "metadata.csv"
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
else:
if not os.path.exists(self._path):
raise RuntimeError(
f"The path {self._path} doesn't exist. "
"Please check the ``root`` path or set `download=True` to download it"
)
with open(self._metadata_path, "r", newline="") as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
Transcript
str:
Normalized Transcript
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(
num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2,
0.9))))
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=3,
dataset=dict( # RepeatDataset
# the dataset is repeated 10 times, and the training schedule is 2x,
# so the actual epoch = 12 * 10 = 120.
times=10,
dataset=dict( # ConcatDataset
# VOCDataset will add different `dataset_type` in dataset.metainfo,
# which will get error if using ConcatDataset. Adding
# `ignore_keys` can avoid this error.
ignore_keys=['dataset_type'],
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline),
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)
])))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4))
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 20],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(
num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2,
0.9))))
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=3,
dataset=dict( # RepeatDataset
# the dataset is repeated 10 times, and the training schedule is 2x,
# so the actual epoch = 12 * 10 = 120.
times=10,
dataset=dict( # ConcatDataset
# VOCDataset will add different `DATASET_TYPE` in dataset.metainfo,
# which will get error if using ConcatDataset. Adding
# `ignore_keys` can avoid this error.
ignore_keys=['DATASET_TYPE'],
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline),
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)
])))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4))
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 20],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
import os
import platform
import sys
import pkg_resources
from setuptools import find_packages, setup
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux") and platform.machine() == "x86_64":
requirements.append("triton==2.0.0")
setup(
name="openai-whisper",
py_modules=["whisper"],
version=read_version(),
description="Robust Speech Recognition via Large-Scale Weak Supervision",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
readme="README.md",
python_requires=">=3.8",
author="OpenAI",
url="https://github.com/openai/whisper",
license="MIT",
packages=find_packages(exclude=["tests*"]),
install_requires=requirements
+ [
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
)
],
entry_points={
"console_scripts": ["whisper=whisper.transcribe:cli"],
},
include_package_data=True,
extras_require={"dev": ["pytest", "scipy", "black", "flake8", "isort"]},
)
|
import os
import platform
import sys
import pkg_resources
from setuptools import find_packages, setup
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux") and platform.machine() == "x86_64":
triton_requirement = "triton==2.0.0"
try:
import re
import subprocess
version_line = (
subprocess.check_output(["nvcc", "--version"]).strip().split(b"\n")[-1]
)
major, minor = re.findall(rb"([\d]+)\.([\d]+)", version_line)[0]
if (int(major), int(minor)) < (11, 4):
# the last version supporting CUDA < 11.4
triton_requirement = "triton==2.0.0.dev20221011"
except (IndexError, OSError, subprocess.SubprocessError):
pass
requirements.append(triton_requirement)
setup(
name="openai-whisper",
py_modules=["whisper"],
version=read_version(),
description="Robust Speech Recognition via Large-Scale Weak Supervision",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
readme="README.md",
python_requires=">=3.8",
author="OpenAI",
url="https://github.com/openai/whisper",
license="MIT",
packages=find_packages(exclude=["tests*"]),
install_requires=requirements
+ [
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
)
],
entry_points={
"console_scripts": ["whisper=whisper.transcribe:cli"],
},
include_package_data=True,
extras_require={"dev": ["pytest", "scipy", "black", "flake8", "isort"]},
)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_chameleon import *
from .image_processing_chameleon import *
from .image_processing_chameleon_fast import *
from .modeling_chameleon import *
from .processing_chameleon import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_chameleon import *
from .image_processing_chameleon import *
from .modeling_chameleon import *
from .processing_chameleon import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 2 or more outputs. It measure the
accuracy of the predict class vs. the gold labels.
"""
def __init__(self, sentence_pairs: list[list[str]], labels: list[int], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.csv_file = "CESoftmaxAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CESoftmaxAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = np.argmax(pred_scores, axis=1)
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info("Accuracy: {:.2f}".format(acc * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
import csv
import logging
import os
from typing import List
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CESoftmaxAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 2 or more outputs. It measure the
accuracy of the predict class vs. the gold labels.
"""
def __init__(self, sentence_pairs: List[List[str]], labels: List[int], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.csv_file = "CESoftmaxAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CESoftmaxAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = np.argmax(pred_scores, axis=1)
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info("Accuracy: {:.2f}".format(acc * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
This example presents how to estimate and visualize the variance of the Receiver
Operating Characteristic (ROC) metric using cross-validation.
ROC curves typically feature true positive rate (TPR) on the Y axis, and false
positive rate (FPR) on the X axis. This means that the top left corner of the
plot is the "ideal" point - a FPR of zero, and a TPR of one. This is not very
realistic, but it does mean that a larger Area Under the Curve (AUC) is usually
better. The "steepness" of ROC curves is also important, since it is ideal to
maximize the TPR while minimizing the FPR.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean AUC, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how different
the splits generated by K-fold cross-validation are from one another.
.. note::
See :ref:`sphx_glr_auto_examples_model_selection_plot_roc.py` for a
complement of the present example explaining the averaging strategies to
generalize the metrics for multiclass classifiers.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Load and prepare data
# =====================
#
# We import the :ref:`iris_dataset` which contains 3 classes, each one
# corresponding to a type of iris plant. One class is linearly separable from
# the other 2; the latter are **not** linearly separable from each other.
#
# In the following we binarize the dataset by dropping the "virginica" class
# (`class_id=2`). This means that the "versicolor" class (`class_id=1`) is
# regarded as the positive class and "setosa" as the negative class
# (`class_id=0`).
import numpy as np
from sklearn.datasets import load_iris
iris = load_iris()
target_names = iris.target_names
X, y = iris.data, iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# %%
# We also add noisy features to make the problem harder.
random_state = np.random.RandomState(0)
X = np.concatenate([X, random_state.randn(n_samples, 200 * n_features)], axis=1)
# %%
# Classification and ROC analysis
# -------------------------------
#
# Here we run a :class:`~sklearn.svm.SVC` classifier with cross-validation and
# plot the ROC curves fold-wise. Notice that the baseline to define the chance
# level (dashed ROC curve) is a classifier that would always predict the most
# frequent class.
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.metrics import RocCurveDisplay, auc
from sklearn.model_selection import StratifiedKFold
n_splits = 6
cv = StratifiedKFold(n_splits=n_splits)
classifier = svm.SVC(kernel="linear", probability=True, random_state=random_state)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
fig, ax = plt.subplots(figsize=(6, 6))
for fold, (train, test) in enumerate(cv.split(X, y)):
classifier.fit(X[train], y[train])
viz = RocCurveDisplay.from_estimator(
classifier,
X[test],
y[test],
name=f"ROC fold {fold}",
curve_kwargs=dict(alpha=0.3, lw=1),
ax=ax,
plot_chance_level=(fold == n_splits - 1),
)
interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(viz.roc_auc)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(
mean_fpr,
mean_tpr,
color="b",
label=r"Mean ROC (AUC = %0.2f $\pm$ %0.2f)" % (mean_auc, std_auc),
lw=2,
alpha=0.8,
)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(
mean_fpr,
tprs_lower,
tprs_upper,
color="grey",
alpha=0.2,
label=r"$\pm$ 1 std. dev.",
)
ax.set(
xlabel="False Positive Rate",
ylabel="True Positive Rate",
title=f"Mean ROC curve with variability\n(Positive label '{target_names[1]}')",
)
ax.legend(loc="lower right")
plt.show()
|
"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
This example presents how to estimate and visualize the variance of the Receiver
Operating Characteristic (ROC) metric using cross-validation.
ROC curves typically feature true positive rate (TPR) on the Y axis, and false
positive rate (FPR) on the X axis. This means that the top left corner of the
plot is the "ideal" point - a FPR of zero, and a TPR of one. This is not very
realistic, but it does mean that a larger Area Under the Curve (AUC) is usually
better. The "steepness" of ROC curves is also important, since it is ideal to
maximize the TPR while minimizing the FPR.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean AUC, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how different
the splits generated by K-fold cross-validation are from one another.
.. note::
See :ref:`sphx_glr_auto_examples_model_selection_plot_roc.py` for a
complement of the present example explaining the averaging strategies to
generalize the metrics for multiclass classifiers.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Load and prepare data
# =====================
#
# We import the :ref:`iris_dataset` which contains 3 classes, each one
# corresponding to a type of iris plant. One class is linearly separable from
# the other 2; the latter are **not** linearly separable from each other.
#
# In the following we binarize the dataset by dropping the "virginica" class
# (`class_id=2`). This means that the "versicolor" class (`class_id=1`) is
# regarded as the positive class and "setosa" as the negative class
# (`class_id=0`).
import numpy as np
from sklearn.datasets import load_iris
iris = load_iris()
target_names = iris.target_names
X, y = iris.data, iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# %%
# We also add noisy features to make the problem harder.
random_state = np.random.RandomState(0)
X = np.concatenate([X, random_state.randn(n_samples, 200 * n_features)], axis=1)
# %%
# Classification and ROC analysis
# -------------------------------
#
# Here we run a :class:`~sklearn.svm.SVC` classifier with cross-validation and
# plot the ROC curves fold-wise. Notice that the baseline to define the chance
# level (dashed ROC curve) is a classifier that would always predict the most
# frequent class.
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.metrics import RocCurveDisplay, auc
from sklearn.model_selection import StratifiedKFold
n_splits = 6
cv = StratifiedKFold(n_splits=n_splits)
classifier = svm.SVC(kernel="linear", probability=True, random_state=random_state)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
fig, ax = plt.subplots(figsize=(6, 6))
for fold, (train, test) in enumerate(cv.split(X, y)):
classifier.fit(X[train], y[train])
viz = RocCurveDisplay.from_estimator(
classifier,
X[test],
y[test],
name=f"ROC fold {fold}",
alpha=0.3,
lw=1,
ax=ax,
plot_chance_level=(fold == n_splits - 1),
)
interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(viz.roc_auc)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(
mean_fpr,
mean_tpr,
color="b",
label=r"Mean ROC (AUC = %0.2f $\pm$ %0.2f)" % (mean_auc, std_auc),
lw=2,
alpha=0.8,
)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(
mean_fpr,
tprs_lower,
tprs_upper,
color="grey",
alpha=0.2,
label=r"$\pm$ 1 std. dev.",
)
ax.set(
xlabel="False Positive Rate",
ylabel="True Positive Rate",
title=f"Mean ROC curve with variability\n(Positive label '{target_names[1]}')",
)
ax.legend(loc="lower right")
plt.show()
|
from docarray.base_document.mixins.io import IOMixin
from docarray.base_document.mixins.update import UpdateMixin
__all__ = ['IOMixin', 'UpdateMixin']
|
from docarray.base_document.mixins.proto import ProtoMixin
from docarray.base_document.mixins.update import UpdateMixin
__all__ = ['ProtoMixin', 'UpdateMixin']
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for file names."""
import itertools
import os
import re
_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
_single_underscore_re = re.compile(r"(?<!_)_(?!_)")
_multiple_underscores_re = re.compile(r"(_{2,})")
_split_re = r"^\w+(\.\w+)*$"
INVALID_WINDOWS_CHARACTERS_IN_PATH = r"<>:/\|?*"
def camelcase_to_snakecase(name):
"""Convert camel-case string to snake-case."""
name = _uppercase_uppercase_re.sub(r"\1_\2", name)
name = _lowercase_uppercase_re.sub(r"\1_\2", name)
return name.lower()
def snakecase_to_camelcase(name):
"""Convert snake-case string to camel-case string."""
name = _single_underscore_re.split(name)
name = [_multiple_underscores_re.split(n) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
def filename_prefix_for_name(name):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
return camelcase_to_snakecase(name)
def filename_prefix_for_split(name, split):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
return f"{filename_prefix_for_name(name)}-{split}"
def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
prefix = filename_prefix_for_split(dataset_name, split)
if filetype_suffix:
prefix += f".{filetype_suffix}"
filepath = os.path.join(data_dir, prefix)
return f"{filepath}*"
def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None):
prefix = filename_prefix_for_split(dataset_name, split)
prefix = os.path.join(path, prefix)
if shard_lengths:
num_shards = len(shard_lengths)
filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)]
if filetype_suffix:
filenames = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
filename = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for file names."""
import itertools
import os
import re
_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
_single_underscore_re = re.compile(r"(?<!_)_(?!_)")
_multiple_underscores_re = re.compile(r"(_{2,})")
_split_re = r"^\w+(\.\w+)*$"
INVALID_WINDOWS_CHARACTERS_IN_PATH = r"<>:/\|?*"
def camelcase_to_snakecase(name):
"""Convert camel-case string to snake-case."""
name = _uppercase_uppercase_re.sub(r"\1_\2", name)
name = _lowercase_uppercase_re.sub(r"\1_\2", name)
return name.lower()
def snakecase_to_camelcase(name):
"""Convert snake-case string to camel-case string."""
name = _single_underscore_re.split(name)
name = [_multiple_underscores_re.split(n) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
def filename_prefix_for_name(name):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
return camelcase_to_snakecase(name)
def filename_prefix_for_split(name, split):
if os.path.basename(name) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}")
if not re.match(_split_re, split):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
return f"{filename_prefix_for_name(name)}-{split}"
def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
prefix = filename_prefix_for_split(dataset_name, split)
if filetype_suffix:
prefix += f".{filetype_suffix}"
filepath = os.path.join(data_dir, prefix)
return f"{filepath}*"
def filename_for_dataset_split(dataset_name, split, filetype_suffix=None):
prefix = filename_prefix_for_split(dataset_name, split)
if filetype_suffix:
prefix += f".{filetype_suffix}"
return prefix
def filepath_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
filename = filename_for_dataset_split(
dataset_name=dataset_name,
split=split,
filetype_suffix=filetype_suffix,
)
filepath = os.path.join(data_dir, filename)
return filepath
|
"""
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than its
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(0.5, 0.15), (0.5, 0.6), (0.3, 0.2)]):
for _ in range(30):
phase_noise = 0.01 * np.random.normal()
amplitude_noise = 0.04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < 0.997] = 0
X.append(
12
* (
(a + amplitude_noise) * (sqr(6 * (t + phi + phase_noise)))
+ additional_noise
)
)
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ("Waveform 1", "Waveform 2", "Waveform 3")
colors = ["#f7bd01", "#377eb8", "#f781bf"]
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, color, n in zip(range(n_clusters), colors, labels):
lines = plt.plot(X[y == l].T, c=color, alpha=0.5)
lines[0].set_label(n)
plt.legend(loc="best")
plt.axis("tight")
plt.axis("off")
plt.suptitle("Ground truth", size=20, y=1)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(
X[y == i], X[y == j], metric=metric
).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
t = plt.text(
i,
j,
"%5.3f" % avg_dist[i, j],
verticalalignment="center",
horizontalalignment="center",
)
t.set_path_effects(
[PathEffects.withStroke(linewidth=5, foreground="w", alpha=0.5)]
)
plt.imshow(avg_dist, interpolation="nearest", cmap="cividis", vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18, y=1)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(
n_clusters=n_clusters, linkage="average", metric=metric
)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, color in zip(np.arange(model.n_clusters), colors):
plt.plot(X[model.labels_ == l].T, c=color, alpha=0.5)
plt.axis("tight")
plt.axis("off")
plt.suptitle("AgglomerativeClustering(metric=%s)" % metric, size=20, y=1)
plt.show()
|
"""
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(0.5, 0.15), (0.5, 0.6), (0.3, 0.2)]):
for _ in range(30):
phase_noise = 0.01 * np.random.normal()
amplitude_noise = 0.04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < 0.997] = 0
X.append(
12
* (
(a + amplitude_noise) * (sqr(6 * (t + phi + phase_noise)))
+ additional_noise
)
)
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ("Waveform 1", "Waveform 2", "Waveform 3")
colors = ["#f7bd01", "#377eb8", "#f781bf"]
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, color, n in zip(range(n_clusters), colors, labels):
lines = plt.plot(X[y == l].T, c=color, alpha=0.5)
lines[0].set_label(n)
plt.legend(loc="best")
plt.axis("tight")
plt.axis("off")
plt.suptitle("Ground truth", size=20, y=1)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(
X[y == i], X[y == j], metric=metric
).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
t = plt.text(
i,
j,
"%5.3f" % avg_dist[i, j],
verticalalignment="center",
horizontalalignment="center",
)
t.set_path_effects(
[PathEffects.withStroke(linewidth=5, foreground="w", alpha=0.5)]
)
plt.imshow(avg_dist, interpolation="nearest", cmap="cividis", vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18, y=1)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(
n_clusters=n_clusters, linkage="average", metric=metric
)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, color in zip(np.arange(model.n_clusters), colors):
plt.plot(X[model.labels_ == l].T, c=color, alpha=0.5)
plt.axis("tight")
plt.axis("off")
plt.suptitle("AgglomerativeClustering(metric=%s)" % metric, size=20, y=1)
plt.show()
|
# coding=utf-8
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import requests
from ..src.diffusers.utils.constants import DIFFUSERS_REQUEST_TIMEOUT
# Configuration
LIBRARY_NAME = "diffusers"
GITHUB_REPO = "huggingface/diffusers"
SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL")
def check_pypi_for_latest_release(library_name):
"""Check PyPI for the latest release of the library."""
response = requests.get(f"https://pypi.org/pypi/{library_name}/json", timeout=DIFFUSERS_REQUEST_TIMEOUT)
if response.status_code == 200:
data = response.json()
return data["info"]["version"]
else:
print("Failed to fetch library details from PyPI.")
return None
def get_github_release_info(github_repo):
"""Fetch the latest release info from GitHub."""
url = f"https://api.github.com/repos/{github_repo}/releases/latest"
response = requests.get(url, timeout=DIFFUSERS_REQUEST_TIMEOUT)
if response.status_code == 200:
data = response.json()
return {"tag_name": data["tag_name"], "url": data["html_url"], "release_time": data["published_at"]}
else:
print("Failed to fetch release info from GitHub.")
return None
def notify_slack(webhook_url, library_name, version, release_info):
"""Send a notification to a Slack channel."""
message = (
f"🚀 New release for {library_name} available: version **{version}** 🎉\n"
f"📜 Release Notes: {release_info['url']}\n"
f"⏱️ Release time: {release_info['release_time']}"
)
payload = {"text": message}
response = requests.post(webhook_url, json=payload)
if response.status_code == 200:
print("Notification sent to Slack successfully.")
else:
print("Failed to send notification to Slack.")
def main():
latest_version = check_pypi_for_latest_release(LIBRARY_NAME)
release_info = get_github_release_info(GITHUB_REPO)
parsed_version = release_info["tag_name"].replace("v", "")
if latest_version and release_info and latest_version == parsed_version:
notify_slack(SLACK_WEBHOOK_URL, LIBRARY_NAME, latest_version, release_info)
else:
print(f"{latest_version=}, {release_info=}, {parsed_version=}")
raise ValueError("There were some problems.")
if __name__ == "__main__":
main()
|
# coding=utf-8
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import requests
# Configuration
LIBRARY_NAME = "diffusers"
GITHUB_REPO = "huggingface/diffusers"
SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL")
def check_pypi_for_latest_release(library_name):
"""Check PyPI for the latest release of the library."""
response = requests.get(f"https://pypi.org/pypi/{library_name}/json")
if response.status_code == 200:
data = response.json()
return data["info"]["version"]
else:
print("Failed to fetch library details from PyPI.")
return None
def get_github_release_info(github_repo):
"""Fetch the latest release info from GitHub."""
url = f"https://api.github.com/repos/{github_repo}/releases/latest"
response = requests.get(url)
if response.status_code == 200:
data = response.json()
return {"tag_name": data["tag_name"], "url": data["html_url"], "release_time": data["published_at"]}
else:
print("Failed to fetch release info from GitHub.")
return None
def notify_slack(webhook_url, library_name, version, release_info):
"""Send a notification to a Slack channel."""
message = (
f"🚀 New release for {library_name} available: version **{version}** 🎉\n"
f"📜 Release Notes: {release_info['url']}\n"
f"⏱️ Release time: {release_info['release_time']}"
)
payload = {"text": message}
response = requests.post(webhook_url, json=payload)
if response.status_code == 200:
print("Notification sent to Slack successfully.")
else:
print("Failed to send notification to Slack.")
def main():
latest_version = check_pypi_for_latest_release(LIBRARY_NAME)
release_info = get_github_release_info(GITHUB_REPO)
parsed_version = release_info["tag_name"].replace("v", "")
if latest_version and release_info and latest_version == parsed_version:
notify_slack(SLACK_WEBHOOK_URL, LIBRARY_NAME, latest_version, release_info)
else:
print(f"{latest_version=}, {release_info=}, {parsed_version=}")
raise ValueError("There were some problems.")
if __name__ == "__main__":
main()
|
"""Import Hugging Face transformers's wav2vec2.0 pretrained weights to torchaudios's format.
"""
import logging
from torch.nn import Module
from ..model import wav2vec2_model, Wav2Vec2Model
_LG = logging.getLogger(__name__)
def _get_config(cfg):
config = {
"extractor_mode": f"{cfg.feat_extract_norm}_norm",
"extractor_conv_layer_config": list(zip(cfg.conv_dim, cfg.conv_kernel, cfg.conv_stride)),
"extractor_conv_bias": cfg.conv_bias,
"encoder_embed_dim": cfg.hidden_size,
"encoder_projection_dropout": cfg.feat_proj_dropout,
"encoder_pos_conv_kernel": cfg.num_conv_pos_embeddings,
"encoder_pos_conv_groups": cfg.num_conv_pos_embedding_groups,
"encoder_num_layers": cfg.num_hidden_layers,
"encoder_num_heads": cfg.num_attention_heads,
"encoder_attention_dropout": cfg.attention_dropout,
"encoder_ff_interm_features": cfg.intermediate_size,
"encoder_ff_interm_dropout": cfg.activation_dropout,
"encoder_dropout": cfg.hidden_dropout,
"encoder_layer_norm_first": cfg.do_stable_layer_norm,
"encoder_layer_drop": cfg.layerdrop,
}
return config
def _build(config, original):
if original.__class__.__name__ == "Wav2Vec2ForCTC":
aux_num_out = original.config.vocab_size
wav2vec2 = original.wav2vec2
else:
_LG.warning("The model is not an instance of Wav2Vec2ForCTC. " '"lm_head" module is not imported.')
aux_num_out = None
wav2vec2 = original
imported = wav2vec2_model(**config, aux_num_out=aux_num_out)
imported.feature_extractor.load_state_dict(wav2vec2.feature_extractor.state_dict())
imported.encoder.feature_projection.load_state_dict(wav2vec2.feature_projection.state_dict())
imported.encoder.transformer.load_state_dict(wav2vec2.encoder.state_dict())
if original.__class__.__name__ == "Wav2Vec2ForCTC":
imported.aux.load_state_dict(original.lm_head.state_dict())
return imported
def import_huggingface_model(original: Module) -> Wav2Vec2Model:
"""Builds :class:`Wav2Vec2Model` from the corresponding model object of
`Transformers <https://huggingface.co/transformers/>`_.
Args:
original (torch.nn.Module): An instance of ``Wav2Vec2ForCTC`` from ``transformers``.
Returns:
Wav2Vec2Model: Imported model.
Example
>>> from torchaudio.models.wav2vec2.utils import import_huggingface_model
>>>
>>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
>>> model = import_huggingface_model(original)
>>>
>>> waveforms, _ = torchaudio.load("audio.wav")
>>> logits, _ = model(waveforms)
"""
_LG.info("Importing model.")
_LG.info("Loading model configuration.")
config = _get_config(original.config)
_LG.debug(" - config: %s", config)
_LG.info("Building model.")
imported = _build(config, original)
return imported
|
"""Import Hugging Face transformers's wav2vec2.0 pretrained weights to torchaudios's format.
"""
import logging
from torch.nn import Module
from ..model import wav2vec2_model, Wav2Vec2Model
_LG = logging.getLogger(__name__)
def _get_config(cfg):
config = {
"extractor_mode": f"{cfg.feat_extract_norm}_norm",
"extractor_conv_layer_config": list(zip(cfg.conv_dim, cfg.conv_kernel, cfg.conv_stride)),
"extractor_conv_bias": cfg.conv_bias,
"encoder_embed_dim": cfg.hidden_size,
"encoder_projection_dropout": cfg.feat_proj_dropout,
"encoder_pos_conv_kernel": cfg.num_conv_pos_embeddings,
"encoder_pos_conv_groups": cfg.num_conv_pos_embedding_groups,
"encoder_num_layers": cfg.num_hidden_layers,
"encoder_num_heads": cfg.num_attention_heads,
"encoder_attention_dropout": cfg.attention_dropout,
"encoder_ff_interm_features": cfg.intermediate_size,
"encoder_ff_interm_dropout": cfg.activation_dropout,
"encoder_dropout": cfg.hidden_dropout,
"encoder_layer_norm_first": cfg.do_stable_layer_norm,
"encoder_layer_drop": cfg.layerdrop,
}
return config
def _build(config, original):
if original.__class__.__name__ == "Wav2Vec2ForCTC":
aux_num_out = original.config.vocab_size
wav2vec2 = original.wav2vec2
else:
_LG.warning("The model is not an instance of Wav2Vec2ForCTC. " '"lm_head" module is not imported.')
aux_num_out = None
wav2vec2 = original
imported = wav2vec2_model(**config, aux_num_out=aux_num_out)
imported.feature_extractor.load_state_dict(wav2vec2.feature_extractor.state_dict())
imported.encoder.feature_projection.load_state_dict(wav2vec2.feature_projection.state_dict())
imported.encoder.transformer.load_state_dict(wav2vec2.encoder.state_dict())
if original.__class__.__name__ == "Wav2Vec2ForCTC":
imported.aux.load_state_dict(original.lm_head.state_dict())
return imported
def import_huggingface_model(original: Module) -> Wav2Vec2Model:
"""Build Wav2Vec2Model from the corresponding model object of Hugging Face's `Transformers`_.
Args:
original (torch.nn.Module): An instance of ``Wav2Vec2ForCTC`` from ``transformers``.
Returns:
Wav2Vec2Model: Imported model.
Example
>>> from torchaudio.models.wav2vec2.utils import import_huggingface_model
>>>
>>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
>>> model = import_huggingface_model(original)
>>>
>>> waveforms, _ = torchaudio.load("audio.wav")
>>> logits, _ = model(waveforms)
.. _Transformers: https://huggingface.co/transformers/
"""
_LG.info("Importing model.")
_LG.info("Loading model configuration.")
config = _get_config(original.config)
_LG.debug(" - config: %s", config)
_LG.info("Building model.")
imported = _build(config, original)
return imported
|
"""Read PDF files using PyMuPDF library."""
from pathlib import Path
from typing import Dict, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PyMuPDFReader(BaseReader):
"""Read PDF files using PyMuPDF library."""
def load_data(
self,
file_path: Union[Path, str],
metadata: bool = True,
extra_info: Optional[Dict] = None,
) -> List[Document]:
"""Loads list of documents from PDF file and also accepts extra information in dict format."""
return self.load(file_path, metadata=metadata, extra_info=extra_info)
def load(
self,
file_path: Union[Path, str],
metadata: bool = True,
extra_info: Optional[Dict] = None,
) -> List[Document]:
"""
Loads list of documents from PDF file and also accepts extra information in dict format.
Args:
file_path (Union[Path, str]): file path of PDF file (accepts string or Path).
metadata (bool, optional): if metadata to be included or not. Defaults to True.
extra_info (Optional[Dict], optional): extra information related to each document in dict format. Defaults to None.
Raises:
TypeError: if extra_info is not a dictionary.
TypeError: if file_path is not a string or Path.
Returns:
List[Document]: list of documents.
"""
import fitz
# check if file_path is a string or Path
if not isinstance(file_path, str) and not isinstance(file_path, Path):
raise TypeError("file_path must be a string or Path.")
# open PDF file
doc = fitz.open(file_path)
# if extra_info is not None, check if it is a dictionary
if extra_info:
if not isinstance(extra_info, dict):
raise TypeError("extra_info must be a dictionary.")
# if metadata is True, add metadata to each document
if metadata:
if not extra_info:
extra_info = {}
extra_info["total_pages"] = len(doc)
extra_info["file_path"] = str(file_path)
# return list of documents
return [
Document(
text=page.get_text().encode("utf-8"),
extra_info=dict(
extra_info,
**{
"source": f"{page.number + 1}",
},
),
)
for page in doc
]
else:
return [
Document(
text=page.get_text().encode("utf-8"), extra_info=extra_info or {}
)
for page in doc
]
|
"""Read PDF files using PyMuPDF library."""
from pathlib import Path
from typing import Dict, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PyMuPDFReader(BaseReader):
"""Read PDF files using PyMuPDF library."""
def load_data(
self,
file_path: Union[Path, str],
metadata: bool = True,
extra_info: Optional[Dict] = None,
) -> List[Document]:
"""Loads list of documents from PDF file and also accepts extra information in dict format."""
return self.load(file_path, metadata=metadata, extra_info=extra_info)
def load(
self,
file_path: Union[Path, str],
metadata: bool = True,
extra_info: Optional[Dict] = None,
) -> List[Document]:
"""
Loads list of documents from PDF file and also accepts extra information in dict format.
Args:
file_path (Union[Path, str]): file path of PDF file (accepts string or Path).
metadata (bool, optional): if metadata to be included or not. Defaults to True.
extra_info (Optional[Dict], optional): extra information related to each document in dict format. Defaults to None.
Raises:
TypeError: if extra_info is not a dictionary.
TypeError: if file_path is not a string or Path.
Returns:
List[Document]: list of documents.
"""
import fitz
# check if file_path is a string or Path
if not isinstance(file_path, str) and not isinstance(file_path, Path):
raise TypeError("file_path must be a string or Path.")
# open PDF file
doc = fitz.open(file_path)
# if extra_info is not None, check if it is a dictionary
if extra_info:
if not isinstance(extra_info, dict):
raise TypeError("extra_info must be a dictionary.")
# if metadata is True, add metadata to each document
if metadata:
if not extra_info:
extra_info = {}
extra_info["total_pages"] = len(doc)
extra_info["file_path"] = str(file_path)
# return list of documents
return [
Document(
text=page.get_text().encode("utf-8"),
extra_info=dict(
extra_info,
**{
"source": f"{page.number+1}",
},
),
)
for page in doc
]
else:
return [
Document(
text=page.get_text().encode("utf-8"), extra_info=extra_info or {}
)
for page in doc
]
|
import torch
from parameterized import parameterized
from torchaudio.prototype.models import squim_objective_base
from torchaudio_unittest.common_utils import skipIfNoCuda, torch_script, TorchaudioTestCase
class TestSQUIM(TorchaudioTestCase):
def _smoke_test_objective(self, model, device, dtype):
model = model.to(device=device, dtype=dtype)
model = model.eval()
batch_size, num_frames = 3, 16000
waveforms = torch.randn(batch_size, num_frames, device=device, dtype=dtype)
model(waveforms)
@parameterized.expand([(torch.float32,), (torch.float64,)])
def test_cpu_smoke_test(self, dtype):
model = squim_objective_base()
self._smoke_test_objective(model, torch.device("cpu"), dtype)
@parameterized.expand([(torch.float32,), (torch.float64,)])
@skipIfNoCuda
def test_cuda_smoke_test(self, dtype):
model = squim_objective_base()
self._smoke_test_objective(model, torch.device("cuda"), dtype)
def test_batch_consistency(self):
model = squim_objective_base()
model.eval()
batch_size, num_frames = 3, 16000
waveforms = torch.randn(batch_size, num_frames)
ref_scores = model(waveforms)
hyp_scores = [torch.zeros(batch_size), torch.zeros(batch_size), torch.zeros(batch_size)]
for i in range(batch_size):
scores = model(waveforms[i : i + 1])
for j in range(3):
hyp_scores[j][i] = scores[j]
self.assertEqual(len(hyp_scores), len(ref_scores))
for i in range(len(ref_scores)):
self.assertEqual(hyp_scores[i], ref_scores[i])
def test_torchscript_consistency(self):
model = squim_objective_base()
model.eval()
batch_size, num_frames = 3, 16000
waveforms = torch.randn(batch_size, num_frames)
ref_scores = model(waveforms)
scripted = torch_script(model)
hyp_scores = scripted(waveforms)
self.assertEqual(len(hyp_scores), len(ref_scores))
for i in range(len(ref_scores)):
self.assertEqual(hyp_scores[i], ref_scores[i])
|
import torch
from parameterized import parameterized
from torchaudio.prototype.models import squim_objective_base
from torchaudio_unittest.common_utils import skipIfNoCuda, torch_script, TorchaudioTestCase
class TestSQUIM(TorchaudioTestCase):
def _smoke_test_objective(self, model, device, dtype):
model = model.to(device=device, dtype=dtype)
model = model.eval()
batch_size, num_frames = 3, 16000
waveforms = torch.randn(batch_size, num_frames, device=device, dtype=dtype)
model(waveforms)
@parameterized.expand([(torch.float32,), (torch.float64,)])
def test_cpu_smoke_test(self, dtype):
model = squim_objective_base()
self._smoke_test_objective(model, torch.device("cpu"), dtype)
@parameterized.expand([(torch.float32,), (torch.float64,)])
@skipIfNoCuda
def test_cuda_smoke_test(self, dtype):
model = squim_objective_base()
self._smoke_test_objective(model, torch.device("cuda"), dtype)
def test_torchscript_consistency(self):
model = squim_objective_base()
model.eval()
batch_size, num_frames = 3, 16000
waveforms = torch.randn(batch_size, num_frames)
ref_scores = model(waveforms)
scripted = torch_script(model)
hyp_scores = scripted(waveforms)
self.assertEqual(len(hyp_scores), len(ref_scores))
for i in range(len(ref_scores)):
self.assertEqual(hyp_scores[i], ref_scores[i])
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses.losses import binary_crossentropy
from keras.src.losses.losses import binary_focal_crossentropy
from keras.src.losses.losses import categorical_crossentropy
from keras.src.losses.losses import categorical_focal_crossentropy
from keras.src.losses.losses import categorical_hinge
from keras.src.losses.losses import hinge
from keras.src.losses.losses import huber
from keras.src.losses.losses import kl_divergence as KLD
from keras.src.losses.losses import kl_divergence as kld
from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence
from keras.src.losses.losses import log_cosh as logcosh
from keras.src.losses.losses import mean_absolute_error as MAE
from keras.src.losses.losses import mean_absolute_error as mae
from keras.src.losses.losses import mean_absolute_percentage_error as MAPE
from keras.src.losses.losses import mean_absolute_percentage_error as mape
from keras.src.losses.losses import mean_squared_error as MSE
from keras.src.losses.losses import mean_squared_error as mse
from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE
from keras.src.losses.losses import mean_squared_logarithmic_error as msle
from keras.src.losses.losses import poisson
from keras.src.losses.losses import sparse_categorical_crossentropy
from keras.src.losses.losses import squared_hinge
from keras.src.metrics import deserialize
from keras.src.metrics import get
from keras.src.metrics import serialize
from keras.src.metrics.accuracy_metrics import Accuracy
from keras.src.metrics.accuracy_metrics import BinaryAccuracy
from keras.src.metrics.accuracy_metrics import CategoricalAccuracy
from keras.src.metrics.accuracy_metrics import SparseCategoricalAccuracy
from keras.src.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy
from keras.src.metrics.accuracy_metrics import TopKCategoricalAccuracy
from keras.src.metrics.accuracy_metrics import binary_accuracy
from keras.src.metrics.accuracy_metrics import categorical_accuracy
from keras.src.metrics.accuracy_metrics import sparse_categorical_accuracy
from keras.src.metrics.accuracy_metrics import sparse_top_k_categorical_accuracy
from keras.src.metrics.accuracy_metrics import top_k_categorical_accuracy
from keras.src.metrics.confusion_metrics import AUC
from keras.src.metrics.confusion_metrics import FalseNegatives
from keras.src.metrics.confusion_metrics import FalsePositives
from keras.src.metrics.confusion_metrics import Precision
from keras.src.metrics.confusion_metrics import PrecisionAtRecall
from keras.src.metrics.confusion_metrics import Recall
from keras.src.metrics.confusion_metrics import RecallAtPrecision
from keras.src.metrics.confusion_metrics import SensitivityAtSpecificity
from keras.src.metrics.confusion_metrics import SpecificityAtSensitivity
from keras.src.metrics.confusion_metrics import TrueNegatives
from keras.src.metrics.confusion_metrics import TruePositives
from keras.src.metrics.correlation_metrics import ConcordanceCorrelation
from keras.src.metrics.correlation_metrics import PearsonCorrelation
from keras.src.metrics.correlation_metrics import concordance_correlation
from keras.src.metrics.correlation_metrics import pearson_correlation
from keras.src.metrics.f_score_metrics import F1Score
from keras.src.metrics.f_score_metrics import FBetaScore
from keras.src.metrics.hinge_metrics import CategoricalHinge
from keras.src.metrics.hinge_metrics import Hinge
from keras.src.metrics.hinge_metrics import SquaredHinge
from keras.src.metrics.iou_metrics import BinaryIoU
from keras.src.metrics.iou_metrics import IoU
from keras.src.metrics.iou_metrics import MeanIoU
from keras.src.metrics.iou_metrics import OneHotIoU
from keras.src.metrics.iou_metrics import OneHotMeanIoU
from keras.src.metrics.metric import Metric
from keras.src.metrics.probabilistic_metrics import BinaryCrossentropy
from keras.src.metrics.probabilistic_metrics import CategoricalCrossentropy
from keras.src.metrics.probabilistic_metrics import KLDivergence
from keras.src.metrics.probabilistic_metrics import Poisson
from keras.src.metrics.probabilistic_metrics import (
SparseCategoricalCrossentropy,
)
from keras.src.metrics.reduction_metrics import Mean
from keras.src.metrics.reduction_metrics import MeanMetricWrapper
from keras.src.metrics.reduction_metrics import Sum
from keras.src.metrics.regression_metrics import CosineSimilarity
from keras.src.metrics.regression_metrics import LogCoshError
from keras.src.metrics.regression_metrics import MeanAbsoluteError
from keras.src.metrics.regression_metrics import MeanAbsolutePercentageError
from keras.src.metrics.regression_metrics import MeanSquaredError
from keras.src.metrics.regression_metrics import MeanSquaredLogarithmicError
from keras.src.metrics.regression_metrics import R2Score
from keras.src.metrics.regression_metrics import RootMeanSquaredError
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses.losses import binary_crossentropy
from keras.src.losses.losses import binary_focal_crossentropy
from keras.src.losses.losses import categorical_crossentropy
from keras.src.losses.losses import categorical_focal_crossentropy
from keras.src.losses.losses import categorical_hinge
from keras.src.losses.losses import hinge
from keras.src.losses.losses import huber
from keras.src.losses.losses import kl_divergence as KLD
from keras.src.losses.losses import kl_divergence as kld
from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence
from keras.src.losses.losses import log_cosh as logcosh
from keras.src.losses.losses import mean_absolute_error as MAE
from keras.src.losses.losses import mean_absolute_error as mae
from keras.src.losses.losses import mean_absolute_percentage_error as MAPE
from keras.src.losses.losses import mean_absolute_percentage_error as mape
from keras.src.losses.losses import mean_squared_error as MSE
from keras.src.losses.losses import mean_squared_error as mse
from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE
from keras.src.losses.losses import mean_squared_logarithmic_error as msle
from keras.src.losses.losses import poisson
from keras.src.losses.losses import sparse_categorical_crossentropy
from keras.src.losses.losses import squared_hinge
from keras.src.metrics import deserialize
from keras.src.metrics import get
from keras.src.metrics import serialize
from keras.src.metrics.accuracy_metrics import Accuracy
from keras.src.metrics.accuracy_metrics import BinaryAccuracy
from keras.src.metrics.accuracy_metrics import CategoricalAccuracy
from keras.src.metrics.accuracy_metrics import SparseCategoricalAccuracy
from keras.src.metrics.accuracy_metrics import SparseTopKCategoricalAccuracy
from keras.src.metrics.accuracy_metrics import TopKCategoricalAccuracy
from keras.src.metrics.accuracy_metrics import binary_accuracy
from keras.src.metrics.accuracy_metrics import categorical_accuracy
from keras.src.metrics.accuracy_metrics import sparse_categorical_accuracy
from keras.src.metrics.accuracy_metrics import sparse_top_k_categorical_accuracy
from keras.src.metrics.accuracy_metrics import top_k_categorical_accuracy
from keras.src.metrics.confusion_metrics import AUC
from keras.src.metrics.confusion_metrics import FalseNegatives
from keras.src.metrics.confusion_metrics import FalsePositives
from keras.src.metrics.confusion_metrics import Precision
from keras.src.metrics.confusion_metrics import PrecisionAtRecall
from keras.src.metrics.confusion_metrics import Recall
from keras.src.metrics.confusion_metrics import RecallAtPrecision
from keras.src.metrics.confusion_metrics import SensitivityAtSpecificity
from keras.src.metrics.confusion_metrics import SpecificityAtSensitivity
from keras.src.metrics.confusion_metrics import TrueNegatives
from keras.src.metrics.confusion_metrics import TruePositives
from keras.src.metrics.f_score_metrics import F1Score
from keras.src.metrics.f_score_metrics import FBetaScore
from keras.src.metrics.hinge_metrics import CategoricalHinge
from keras.src.metrics.hinge_metrics import Hinge
from keras.src.metrics.hinge_metrics import SquaredHinge
from keras.src.metrics.iou_metrics import BinaryIoU
from keras.src.metrics.iou_metrics import IoU
from keras.src.metrics.iou_metrics import MeanIoU
from keras.src.metrics.iou_metrics import OneHotIoU
from keras.src.metrics.iou_metrics import OneHotMeanIoU
from keras.src.metrics.metric import Metric
from keras.src.metrics.probabilistic_metrics import BinaryCrossentropy
from keras.src.metrics.probabilistic_metrics import CategoricalCrossentropy
from keras.src.metrics.probabilistic_metrics import KLDivergence
from keras.src.metrics.probabilistic_metrics import Poisson
from keras.src.metrics.probabilistic_metrics import (
SparseCategoricalCrossentropy,
)
from keras.src.metrics.reduction_metrics import Mean
from keras.src.metrics.reduction_metrics import MeanMetricWrapper
from keras.src.metrics.reduction_metrics import Sum
from keras.src.metrics.regression_metrics import CosineSimilarity
from keras.src.metrics.regression_metrics import LogCoshError
from keras.src.metrics.regression_metrics import MeanAbsoluteError
from keras.src.metrics.regression_metrics import MeanAbsolutePercentageError
from keras.src.metrics.regression_metrics import MeanSquaredError
from keras.src.metrics.regression_metrics import MeanSquaredLogarithmicError
from keras.src.metrics.regression_metrics import R2Score
from keras.src.metrics.regression_metrics import RootMeanSquaredError
|
import pathlib
from argparse import ArgumentParser
def main(args):
wheel_path = pathlib.Path(args.wheel_path).expanduser().resolve()
if not wheel_path.exists():
raise ValueError(f"Wheel cannot be found at path {wheel_path}")
if not wheel_path.is_file():
raise ValueError(f"Path {wheel_path} is not a valid file")
wheel_dir, wheel_name = wheel_path.parent, wheel_path.name
tokens = wheel_name.split("-")
assert len(tokens) == 5
version = tokens[1].split("+")[0]
keywords = {
"pkg_name": tokens[0],
"version": version,
"commit_id": args.commit_hash,
"platform_tag": args.platform_tag,
}
new_wheel_name = (
"{pkg_name}-{version}+{commit_id}-py3-none-{platform_tag}.whl".format(
**keywords
)
)
new_wheel_path = wheel_dir / new_wheel_name
print(f"Renaming {wheel_name} to {new_wheel_name}...")
if new_wheel_name == wheel_name:
print("Skipping, as the old name is identical to the new name.")
else:
if new_wheel_path.is_file():
new_wheel_path.unlink()
wheel_path.rename(new_wheel_path)
filesize = new_wheel_path.stat().st_size / 1024 / 1024 # MiB
print(f"Wheel size: {filesize:.2f} MiB")
if filesize > 300:
raise RuntimeError(
f"Limit of wheel size set by PyPI is exceeded. {new_wheel_name}: {filesize:.2f} MiB"
)
if __name__ == "__main__":
parser = ArgumentParser(
description="Format a Python wheel's name using the git commit hash and platform tag"
)
parser.add_argument(
"--wheel-path", type=str, required=True, help="Path to the wheel"
)
parser.add_argument(
"--commit-hash", type=str, required=True, help="Git commit hash"
)
parser.add_argument(
"--platform-tag",
type=str,
required=True,
help="Platform tag (e.g. manylinux_2_28_x86_64)",
)
parsed_args = parser.parse_args()
main(parsed_args)
|
import pathlib
from argparse import ArgumentParser
def main(args):
wheel_path = pathlib.Path(args.wheel_path).expanduser().resolve()
if not wheel_path.exists():
raise ValueError(f"Wheel cannot be found at path {wheel_path}")
if not wheel_path.is_file():
raise ValueError(f"Path {wheel_path} is not a valid file")
wheel_dir, wheel_name = wheel_path.parent, wheel_path.name
tokens = wheel_name.split("-")
assert len(tokens) == 5
version = tokens[1].split("+")[0]
keywords = {
"pkg_name": tokens[0],
"version": version,
"commit_id": args.commit_hash,
"platform_tag": args.platform_tag,
}
new_wheel_name = (
"{pkg_name}-{version}+{commit_id}-py3-none-{platform_tag}.whl".format(
**keywords
)
)
new_wheel_path = wheel_dir / new_wheel_name
print(f"Renaming {wheel_name} to {new_wheel_name}...")
if new_wheel_path.is_file():
new_wheel_path.unlink()
wheel_path.rename(new_wheel_path)
filesize = new_wheel_path.stat().st_size / 1024 / 1024 # MiB
print(f"Wheel size: {filesize:.2f} MiB")
if filesize > 300:
raise RuntimeError(
f"Limit of wheel size set by PyPI is exceeded. {new_wheel_name}: {filesize:.2f} MiB"
)
if __name__ == "__main__":
parser = ArgumentParser(
description="Format a Python wheel's name using the git commit hash and platform tag"
)
parser.add_argument(
"--wheel-path", type=str, required=True, help="Path to the wheel"
)
parser.add_argument(
"--commit-hash", type=str, required=True, help="Git commit hash"
)
parser.add_argument(
"--platform-tag",
type=str,
required=True,
help="Platform tag (e.g. manylinux2014_x86_64)",
)
parsed_args = parser.parse_args()
main(parsed_args)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.4.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.4.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
import subprocess
import pytest
from dpr_text import DPRTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=DPRTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"cuda"',
],
timeout=30,
check=True,
)
|
import subprocess
import pytest
from dpr_text import DPRTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=DPRTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
from typing import List, Optional
from llama_index.core.instrumentation.events.base import BaseEvent
from llama_index.core.schema import NodeWithScore, QueryType
from llama_index.core.bridge.pydantic import ConfigDict
class ReRankStartEvent(BaseEvent):
"""
ReRankStartEvent.
Args:
query (QueryType): Query as a string or query bundle.
nodes (List[NodeWithScore]): List of nodes with scores.
top_n (int): Number of nodes to return after rerank.
model_name (str): Name of the model used for reranking.
"""
model_config = ConfigDict(protected_namespaces=("pydantic_model_",))
query: Optional[QueryType]
nodes: List[NodeWithScore]
top_n: int
model_name: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "ReRankStartEvent"
class ReRankEndEvent(BaseEvent):
"""
ReRankEndEvent.
Args:
nodes (List[NodeWithScore]): List of returned nodes after rerank.
"""
nodes: List[NodeWithScore]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "ReRankEndEvent"
|
from typing import List, Optional
from llama_index.core.instrumentation.events.base import BaseEvent
from llama_index.core.schema import NodeWithScore, QueryType
from llama_index.core.bridge.pydantic import ConfigDict
class ReRankStartEvent(BaseEvent):
"""ReRankStartEvent.
Args:
query (QueryType): Query as a string or query bundle.
nodes (List[NodeWithScore]): List of nodes with scores.
top_n (int): Number of nodes to return after rerank.
model_name (str): Name of the model used for reranking.
"""
model_config = ConfigDict(protected_namespaces=("pydantic_model_",))
query: Optional[QueryType]
nodes: List[NodeWithScore]
top_n: int
model_name: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "ReRankStartEvent"
class ReRankEndEvent(BaseEvent):
"""ReRankEndEvent.
Args:
nodes (List[NodeWithScore]): List of returned nodes after rerank.
"""
nodes: List[NodeWithScore]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "ReRankEndEvent"
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=depths,
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
frozen_stages=-1,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
type='Mask2FormerHead', in_channels=[96, 192, 384, 768]),
init_cfg=None)
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=depths,
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
frozen_stages=-1,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
type='Mask2FormerHead', in_channels=[96, 192, 384, 768]),
init_cfg=None)
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.1'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.powerbi.base import create_pbi_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"create_pbi_agent": "langchain_community.agent_toolkits.powerbi.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_pbi_agent",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.powerbi.base import create_pbi_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"create_pbi_agent": "langchain_community.agent_toolkits.powerbi.base"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_pbi_agent",
]
|
import logging
import os
import threading
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns a prefix string for logging purposes.
This needs to be called on the fly to get the current process ID & service name,
not the parent process ID & service name.
"""
return f"[PID-{os.getpid()}|THREAD-{threading.get_native_id()}|{get_service_name()}|{resource_name}-{conn_id}]"
def conn_retry(resource_name: str, action_name: str, max_retry: int = 5):
conn_id = str(uuid4())
def on_retry(retry_state):
prefix = _log_prefix(resource_name, conn_id)
exception = retry_state.outcome.exception()
logger.error(f"{prefix} {action_name} failed: {exception}. Retrying now...")
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
# Define the retrying strategy
retrying_func = retry(
stop=stop_after_attempt(max_retry + 1),
wait=wait_exponential(multiplier=1, min=1, max=30),
before_sleep=on_retry,
reraise=True,
)(func)
try:
result = retrying_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
return wrapper
return decorator
|
import logging
import os
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns a prefix string for logging purposes.
This needs to be called on the fly to get the current process ID & service name,
not the parent process ID & service name.
"""
return f"[PID-{os.getpid()}|{get_service_name()}|{resource_name}-{conn_id}]"
def conn_retry(resource_name: str, action_name: str, max_retry: int = 5):
conn_id = str(uuid4())
def on_retry(retry_state):
prefix = _log_prefix(resource_name, conn_id)
exception = retry_state.outcome.exception()
logger.info(f"{prefix} {action_name} failed: {exception}. Retrying now...")
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
# Define the retrying strategy
retrying_func = retry(
stop=stop_after_attempt(max_retry + 1),
wait=wait_exponential(multiplier=1, min=1, max=30),
before_sleep=on_retry,
reraise=True,
)(func)
try:
result = retrying_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
return wrapper
return decorator
|
__version__ = '0.16.3'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.16.2'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
class DataAdapter:
"""Base class for input data adapters.
The purpose of a DataAdapter is to provide a unfied interface to
iterate over input data provided in a variety of formats -- such as
NumPy arrays, tf.Tensors, tf.data.Datasets, Keras PyDatasets, etc.
"""
def get_numpy_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields NumPy
arrays.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_tf_dataset(self):
"""Get a `tf.data.Dataset` instance for the DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller
might need to create new iterator for the same dataset at the beginning
of the epoch. This behavior might change in the future.
Returns:
A `tf.data.Dataset`. Caller might use the dataset in different
context, e.g. iter(dataset) in eager to get the value directly, or
in graph mode, provide the iterator tensor to Keras model function.
"""
raise NotImplementedError
def get_jax_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields arrays that
that can be fed to JAX. NumPy arrays are preferred for performance.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_torch_dataloader(self):
"""Get a Torch `DataLoader` for the `DataAdapter`.
Returns:
A Torch `DataLoader`.
"""
raise NotImplementedError
@property
def num_batches(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg
for Numpy data, the size is same as (number_of_element / batch_size).
Whereas for dataset or python generator, the size is unknown since it
may or may not have an end state.
Returns:
int, the number of batches for the dataset, or None if it is
unknown. The caller could use this to control the loop of training,
show progress bar, or handle unexpected StopIteration error.
"""
raise NotImplementedError
@property
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Whereas for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
@property
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@property
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
def on_epoch_begin(self):
"""A hook called before each epoch."""
pass
def on_epoch_end(self):
"""A hook called after each epoch."""
pass
|
class DataAdapter:
"""Base class for input data adapters.
The purpose of a DataAdapter is to provide a unfied interface to
iterate over input data provided in a variety of formats -- such as
NumPy arrays, tf.Tensors, tf.data.Datasets, Keras PyDatasets, etc.
"""
def get_numpy_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields NumPy
arrays.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_tf_dataset(self):
"""Get a `tf.data.Dataset` instance for the DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller
might need to create new iterator for the same dataset at the beginning
of the epoch. This behavior might change in the future.
Returns:
A `tf.data.Dataset`. Caller might use the dataset in different
context, e.g. iter(dataset) in eager to get the value directly, or
in graph mode, provide the iterator tensor to Keras model function.
"""
raise NotImplementedError
def get_jax_iterator(self):
"""Get a Python iterable for the `DataAdapter`, that yields JAX arrays.
Returns:
A Python iterator.
"""
raise NotImplementedError
def get_torch_dataloader(self):
"""Get a Torch `DataLoader` for the `DataAdapter`.
Returns:
A Torch `DataLoader`.
"""
raise NotImplementedError
@property
def num_batches(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg
for Numpy data, the size is same as (number_of_element / batch_size).
Whereas for dataset or python generator, the size is unknown since it
may or may not have an end state.
Returns:
int, the number of batches for the dataset, or None if it is
unknown. The caller could use this to control the loop of training,
show progress bar, or handle unexpected StopIteration error.
"""
raise NotImplementedError
@property
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Whereas for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
@property
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@property
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
def on_epoch_begin(self):
"""A hook called before each epoch."""
pass
def on_epoch_end(self):
"""A hook called after each epoch."""
pass
|
from typing import Union
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import Runnable
from langchain_core.utils.function_calling import convert_pydantic_to_openai_function
from pydantic import BaseModel
_EXTRACTION_TEMPLATE = """Extract and save the relevant entities mentioned \
in the following passage together with their properties.
If a property is not present and is not required in the function parameters, do not include it in the output.""" # noqa: E501
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that"
"is available on ChatModels capable of tool calling."
"You can read more about the method here: "
"<https://python.langchain.com/docs/modules/model_io/chat/structured_output/>. "
"Please follow our extraction use case documentation for more guidelines"
"on how to do information extraction with LLMs."
"<https://python.langchain.com/docs/use_cases/extraction/>. "
"with_structured_output does not currently support a list of pydantic schemas. "
"If this is a blocker or if you notice other issues, please provide "
"feedback here:"
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
)
def create_extraction_chain_pydantic(
pydantic_schemas: Union[list[type[BaseModel]], type[BaseModel]],
llm: BaseLanguageModel,
system_message: str = _EXTRACTION_TEMPLATE,
) -> Runnable:
"""Creates a chain that extracts information from a passage.
Args:
pydantic_schemas: The schema of the entities to extract.
llm: The language model to use.
system_message: The system message to use for extraction.
Returns:
A runnable that extracts information from a passage.
"""
if not isinstance(pydantic_schemas, list):
pydantic_schemas = [pydantic_schemas]
prompt = ChatPromptTemplate.from_messages(
[
("system", system_message),
("user", "{input}"),
],
)
functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas]
tools = [{"type": "function", "function": d} for d in functions]
model = llm.bind(tools=tools)
return prompt | model | PydanticToolsParser(tools=pydantic_schemas)
|
from typing import Union
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import Runnable
from langchain_core.utils.function_calling import convert_pydantic_to_openai_function
from pydantic import BaseModel
_EXTRACTION_TEMPLATE = """Extract and save the relevant entities mentioned \
in the following passage together with their properties.
If a property is not present and is not required in the function parameters, do not include it in the output.""" # noqa: E501
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that"
"is available on ChatModels capable of tool calling."
"You can read more about the method here: "
"<https://python.langchain.com/docs/modules/model_io/chat/structured_output/>. "
"Please follow our extraction use case documentation for more guidelines"
"on how to do information extraction with LLMs."
"<https://python.langchain.com/docs/use_cases/extraction/>. "
"with_structured_output does not currently support a list of pydantic schemas. "
"If this is a blocker or if you notice other issues, please provide "
"feedback here:"
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
)
def create_extraction_chain_pydantic(
pydantic_schemas: Union[list[type[BaseModel]], type[BaseModel]],
llm: BaseLanguageModel,
system_message: str = _EXTRACTION_TEMPLATE,
) -> Runnable:
"""Creates a chain that extracts information from a passage.
Args:
pydantic_schemas: The schema of the entities to extract.
llm: The language model to use.
system_message: The system message to use for extraction.
Returns:
A runnable that extracts information from a passage.
"""
if not isinstance(pydantic_schemas, list):
pydantic_schemas = [pydantic_schemas]
prompt = ChatPromptTemplate.from_messages(
[
("system", system_message),
("user", "{input}"),
]
)
functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas]
tools = [{"type": "function", "function": d} for d in functions]
model = llm.bind(tools=tools)
return prompt | model | PydanticToolsParser(tools=pydantic_schemas)
|
import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. We use a batch size of 16, for every positive example we include 8-1=7 negative examples
# Sentences are truncated to 75 word pieces
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be devisible by pos_neg_ratio
max_seq_length = 75
num_epochs = 1
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
output_path = "output/train_askubuntu_ct-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(sentence)
logging.info(f"{len(train_sentences)} train sentences")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=1,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={"lr": 1e-5},
use_amp=False, # Set to True, if your GPU has optimized FP16 cores
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. We use a batch size of 16, for every positive example we include 8-1=7 negative examples
# Sentences are truncated to 75 word pieces
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be devisible by pos_neg_ratio
max_seq_length = 75
num_epochs = 1
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
output_path = "output/train_askubuntu_ct-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(sentence)
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=1,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={"lr": 1e-5},
use_amp=False, # Set to True, if your GPU has optimized FP16 cores
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from docarray import BaseDoc
from docarray.typing import NdArray
def test_set_tensor():
class MyDocument(BaseDoc):
tensor: NdArray
d = MyDocument(tensor=np.zeros((3, 224, 224)))
assert isinstance(d.tensor, NdArray)
assert isinstance(d.tensor, np.ndarray)
assert (d.tensor == np.zeros((3, 224, 224))).all()
|
import numpy as np
from docarray import BaseDoc
from docarray.typing import NdArray
def test_set_tensor():
class MyDocument(BaseDoc):
tensor: NdArray
d = MyDocument(tensor=np.zeros((3, 224, 224)))
assert isinstance(d.tensor, NdArray)
assert isinstance(d.tensor, np.ndarray)
assert (d.tensor == np.zeros((3, 224, 224))).all()
|
import argparse
import os.path as osp
from mmengine.fileio import dump, load
def parse_args():
parser = argparse.ArgumentParser(
description='Generate COCO test image information '
'for COCO panoptic segmentation.')
parser.add_argument('data_root', help='Path to COCO annotation directory.')
args = parser.parse_args()
return args
def main():
args = parse_args()
data_root = args.data_root
val_info = load(osp.join(data_root, 'panoptic_val2017.json'))
test_old_info = load(osp.join(data_root, 'image_info_test-dev2017.json'))
# replace categories from image_info_test-dev2017.json
# with categories from panoptic_val2017.json which
# has attribute `isthing`.
test_info = test_old_info
test_info.update({'categories': val_info['categories']})
dump(test_info, osp.join(data_root,
'panoptic_image_info_test-dev2017.json'))
if __name__ == '__main__':
main()
|
import argparse
import os.path as osp
import mmcv
def parse_args():
parser = argparse.ArgumentParser(
description='Generate COCO test image information '
'for COCO panoptic segmentation.')
parser.add_argument('data_root', help='Path to COCO annotation directory.')
args = parser.parse_args()
return args
def main():
args = parse_args()
data_root = args.data_root
val_info = mmcv.load(osp.join(data_root, 'panoptic_val2017.json'))
test_old_info = mmcv.load(
osp.join(data_root, 'image_info_test-dev2017.json'))
# replace categories from image_info_test-dev2017.json
# with categories from panoptic_val2017.json which
# has attribute `isthing`.
test_info = test_old_info
test_info.update({'categories': val_info['categories']})
mmcv.dump(test_info,
osp.join(data_root, 'panoptic_image_info_test-dev2017.json'))
if __name__ == '__main__':
main()
|
import os
import pytest
from docarray import Document
from jina import Executor, Flow, requests
class MyExec(Executor):
@requests
def foo(self, docs, **kwargs):
pass
@pytest.fixture
def cert_prefix():
cur_dir = os.path.dirname(os.path.abspath(__file__))
return f'{cur_dir}/../../../unit/serve/runtimes/gateway/grpc/cert/'
@pytest.fixture
def cert_pem(cert_prefix):
"""This is the cert entry of a self-signed local cert"""
return cert_prefix + '/server.crt'
@pytest.fixture
def key_pem(cert_prefix):
"""This is the key entry of a self-signed local cert"""
return cert_prefix + '/server.key'
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('tls', [True, False])
@pytest.mark.parametrize(
'uses',
['jinaai+sandbox://jina-ai/DummyHubExecutor'],
)
def test_deployment_protocol(protocol, tls, cert_pem, key_pem, uses):
cert = cert_pem if tls else None
key = key_pem if tls else None
f = (
Flow(protocol=protocol, ssl_certfile=cert, ssl_keyfile=key)
.add(uses=MyExec)
.add(uses=uses)
)
with f:
for node, v in f._deployment_nodes.items():
p = v.protocol.lower()
if node == 'gateway':
assert p == protocol + ('s' if tls else '')
elif node == 'executor0':
assert p == 'grpc'
elif node == 'executor1':
assert p == 'grpcs'
|
import os
import pytest
from docarray import Document
from jina import Executor, Flow, requests
class MyExec(Executor):
@requests
def foo(self, docs, **kwargs):
pass
@pytest.fixture
def cert_prefix():
cur_dir = os.path.dirname(os.path.abspath(__file__))
return f'{cur_dir}/../../../unit/serve/runtimes/gateway/grpc/cert/'
@pytest.fixture
def cert_pem(cert_prefix):
"""This is the cert entry of a self-signed local cert"""
return cert_prefix + '/server.crt'
@pytest.fixture
def key_pem(cert_prefix):
"""This is the key entry of a self-signed local cert"""
return cert_prefix + '/server.key'
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('tls', [True, False])
def test_deployment_protocol(protocol, tls, cert_pem, key_pem):
cert = cert_pem if tls else None
key = key_pem if tls else None
f = (
Flow(protocol=protocol, ssl_certfile=cert, ssl_keyfile=key)
.add(uses=MyExec)
.add(uses='jinahub+sandbox://DummyHubExecutor')
)
with f:
for node, v in f._deployment_nodes.items():
p = v.protocol.lower()
if node == 'gateway':
assert p == protocol + ('s' if tls else '')
elif node == 'executor0':
assert p == 'grpc'
elif node == 'executor1':
assert p == 'grpcs'
|
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal
@_register_explicit_noop(
PIL.Image.Image, datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask, warn_passthrough=True
)
def uniform_temporal_subsample(inpt: datapoints._VideoTypeJIT, num_samples: int) -> datapoints._VideoTypeJIT:
if torch.jit.is_scripting():
return uniform_temporal_subsample_video(inpt, num_samples=num_samples)
_log_api_usage_once(uniform_temporal_subsample)
kernel = _get_kernel(uniform_temporal_subsample, type(inpt))
return kernel(inpt, num_samples=num_samples)
@_register_kernel_internal(uniform_temporal_subsample, torch.Tensor)
@_register_kernel_internal(uniform_temporal_subsample, datapoints.Video)
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
|
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_explicit_noop, _register_kernel_internal, is_simple_tensor
@_register_explicit_noop(
PIL.Image.Image, datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask, warn_passthrough=True
)
def uniform_temporal_subsample(inpt: datapoints._VideoTypeJIT, num_samples: int) -> datapoints._VideoTypeJIT:
if not torch.jit.is_scripting():
_log_api_usage_once(uniform_temporal_subsample)
if torch.jit.is_scripting() or is_simple_tensor(inpt):
return uniform_temporal_subsample_video(inpt, num_samples)
elif isinstance(inpt, datapoints.Datapoint):
kernel = _get_kernel(uniform_temporal_subsample, type(inpt))
return kernel(inpt, num_samples)
else:
raise TypeError(
f"Input can either be a plain tensor or any TorchVision datapoint, but got {type(inpt)} instead."
)
@_register_kernel_internal(uniform_temporal_subsample, datapoints.Video)
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.mimetypes import OBJ_MIMETYPE
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import trimesh
T = TypeVar('T', bound='Url3D')
@_register_proto(proto_type_name='url3d')
class Url3D(AnyUrl, ABC):
"""
URL to a file containing 3D mesh or point cloud information.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def mime_type(cls) -> str:
return OBJ_MIMETYPE
def _load_trimesh_instance(
self: T,
force: Optional[str] = None,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""
Load the data from the url into a trimesh.Mesh or trimesh.Scene object.
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh.
For 'scene' try to coerce everything into a scene.
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
if not trimesh_args:
trimesh_args = {}
scheme = urllib.parse.urlparse(self).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self, force=force, skip_materials=skip_materials, **trimesh_args)
return mesh
|
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.mimetypes import OBJ_MIMETYPE
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import trimesh
T = TypeVar('T', bound='Url3D')
@_register_proto(proto_type_name='url3d')
class Url3D(AnyUrl, ABC):
"""
URL to a file containing 3D mesh or point cloud information.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def mime_type(cls) -> str:
return OBJ_MIMETYPE
def _load_trimesh_instance(
self: T,
force: Optional[str] = None,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""
Load the data from the url into a trimesh.Mesh or trimesh.Scene object.
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh.
For 'scene' try to coerce everything into a scene.
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
if not trimesh_args:
trimesh_args = {}
scheme = urllib.parse.urlparse(self).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self, force=force, skip_materials=skip_materials, **trimesh_args)
return mesh
|
from typing import Optional, List, Dict, Any, TYPE_CHECKING, Union
from pydantic import BaseModel, validator
from docarray.math.ndarray import to_list
if TYPE_CHECKING:
from docarray.typing import ArrayType
# this order must be preserved: https://pydantic-docs.helpmanual.io/usage/types/#unions
_ProtoValueType = Optional[Union[bool, float, str, list, dict]]
_StructValueType = Union[
_ProtoValueType, List[_ProtoValueType], Dict[str, _ProtoValueType]
]
_MetadataType = Dict[str, _StructValueType]
def _convert_ndarray_to_list(v: 'ArrayType'):
if v is not None:
return to_list(v)
class _NamedScore(BaseModel):
value: Optional[float] = None
op_name: Optional[str] = None
description: Optional[str] = None
ref_id: Optional[str] = None
class _MetadataModel(BaseModel):
metadata: _MetadataType
class PydanticDocument(BaseModel):
id: Optional[str]
parent_id: Optional[str]
granularity: Optional[int]
adjacency: Optional[int]
blob: Optional[str]
tensor: Optional[Any]
mime_type: Optional[str]
text: Optional[str]
weight: Optional[float]
uri: Optional[str]
tags: Optional[Dict[str, '_StructValueType']]
_metadata: Optional[Dict[str, '_StructValueType']]
offset: Optional[float]
location: Optional[List[float]]
embedding: Optional[Any]
modality: Optional[str]
evaluations: Optional[Dict[str, '_NamedScore']]
scores: Optional[Dict[str, '_NamedScore']]
chunks: Optional[List['PydanticDocument']]
matches: Optional[List['PydanticDocument']]
_tensor2list = validator('tensor', allow_reuse=True)(_convert_ndarray_to_list)
_embedding2list = validator('embedding', allow_reuse=True)(_convert_ndarray_to_list)
class Config:
smart_union = True
def __init__(self, **data):
super().__init__(**data)
# underscore attributes need to be set and validated manually
_metadata = data.get('_metadata', None)
if _metadata is not None:
_md_model = _MetadataModel(metadata=_metadata) # validate _metadata
object.__setattr__(self, '_metadata', _md_model.metadata)
PydanticDocument.update_forward_refs()
PydanticDocumentArray = List[PydanticDocument]
|
from typing import Optional, List, Dict, Any, TYPE_CHECKING, Union
from pydantic import BaseModel, validator
from ..math.ndarray import to_list
if TYPE_CHECKING:
from ..typing import ArrayType
# this order must be preserved: https://pydantic-docs.helpmanual.io/usage/types/#unions
_ProtoValueType = Optional[Union[bool, float, str, list, dict]]
_StructValueType = Union[
_ProtoValueType, List[_ProtoValueType], Dict[str, _ProtoValueType]
]
_MetadataType = Dict[str, _StructValueType]
def _convert_ndarray_to_list(v: 'ArrayType'):
if v is not None:
return to_list(v)
class _NamedScore(BaseModel):
value: Optional[float] = None
op_name: Optional[str] = None
description: Optional[str] = None
ref_id: Optional[str] = None
class _MetadataModel(BaseModel):
metadata: _MetadataType
class PydanticDocument(BaseModel):
id: Optional[str]
parent_id: Optional[str]
granularity: Optional[int]
adjacency: Optional[int]
blob: Optional[str]
tensor: Optional[Any]
mime_type: Optional[str]
text: Optional[str]
weight: Optional[float]
uri: Optional[str]
tags: Optional[Dict[str, '_StructValueType']]
_metadata: Optional[Dict[str, '_StructValueType']]
offset: Optional[float]
location: Optional[List[float]]
embedding: Optional[Any]
modality: Optional[str]
evaluations: Optional[Dict[str, '_NamedScore']]
scores: Optional[Dict[str, '_NamedScore']]
chunks: Optional[List['PydanticDocument']]
matches: Optional[List['PydanticDocument']]
_tensor2list = validator('tensor', allow_reuse=True)(_convert_ndarray_to_list)
_embedding2list = validator('embedding', allow_reuse=True)(_convert_ndarray_to_list)
class Config:
smart_union = True
def __init__(self, **data):
super().__init__(**data)
# underscore attributes need to be set and validated manually
_metadata = data.get('_metadata', None)
if _metadata is not None:
_md_model = _MetadataModel(metadata=_metadata) # validate _metadata
object.__setattr__(self, '_metadata', _md_model.metadata)
PydanticDocument.update_forward_refs()
PydanticDocumentArray = List[PydanticDocument]
|
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
strides=(1, 2, 2, 1),
dilations=(1, 1, 1, 2),
out_indices=(3, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=2048,
feat_channels=2048,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=2048,
featmap_strides=[16]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=2048,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms=dict(type='nms', iou_threshold=0.7),
nms_pre=6000,
max_per_img=1000,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
strides=(1, 2, 2, 1),
dilations=(1, 1, 1, 2),
out_indices=(3, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=2048,
feat_channels=2048,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=2048,
featmap_strides=[16]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=2048,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms=dict(type='nms', iou_threshold=0.7),
nms_pre=6000,
max_per_img=1000,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
from tests.utils import is_ci
if not is_datasets_available():
pytest.skip(
reason="Datasets are not installed. Please install `datasets` with `pip install datasets`",
allow_module_level=True,
)
if is_ci():
pytest.skip(
reason="Skip test in CI to try and avoid 429 Client Error",
allow_module_level=True,
)
def test_nanobeir_evaluator(stsb_bert_tiny_model_reused: SentenceTransformer):
"""Tests that the NanoBERTEvaluator can be loaded and produces expected metrics"""
datasets = ["QuoraRetrieval", "MSMARCO"]
query_prompts = {
"QuoraRetrieval": "Instruct: Given a question, retrieve questions that are semantically equivalent to the given question\\nQuery: ",
"MSMARCO": "Instruct: Given a web search query, retrieve relevant passages that answer the query\\nQuery: ",
}
model = stsb_bert_tiny_model_reused
evaluator = NanoBEIREvaluator(
dataset_names=datasets,
query_prompts=query_prompts,
)
results = evaluator(model)
assert len(results) > 0
assert all(isinstance(results[metric], float) for metric in results)
def test_nanobeir_evaluator_with_invalid_dataset():
"""Test that NanoBEIREvaluator raises an error for invalid dataset names."""
invalid_datasets = ["invalidDataset"]
with pytest.raises(
ValueError,
match=re.escape(
r"Dataset(s) ['invalidDataset'] not found in the NanoBEIR collection. "
r"Valid dataset names are: ['climatefever', 'dbpedia', 'fever', 'fiqa2018', 'hotpotqa', 'msmarco', 'nfcorpus', 'nq', 'quoraretrieval', 'scidocs', 'arguana', 'scifact', 'touche2020']"
),
):
NanoBEIREvaluator(dataset_names=invalid_datasets)
def test_nanobeir_evaluator_empty_inputs():
"""Test that NanoBEIREvaluator behaves correctly with empty datasets."""
with pytest.raises(ValueError, match="dataset_names cannot be empty. Use None to evaluate on all datasets."):
NanoBEIREvaluator(dataset_names=[])
|
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
if not is_datasets_available():
pytest.skip(
reason="Datasets are not installed. Please install `datasets` with `pip install datasets`",
allow_module_level=True,
)
def test_nanobeir_evaluator():
"""Tests that the NanoBERTEvaluator can be loaded and produces expected metrics"""
datasets = ["QuoraRetrieval", "MSMARCO"]
query_prompts = {
"QuoraRetrieval": "Instruct: Given a question, retrieve questions that are semantically equivalent to the given question\\nQuery: ",
"MSMARCO": "Instruct: Given a web search query, retrieve relevant passages that answer the query\\nQuery: ",
}
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
evaluator = NanoBEIREvaluator(
dataset_names=datasets,
query_prompts=query_prompts,
)
results = evaluator(model)
assert len(results) > 0
assert all(isinstance(results[metric], float) for metric in results)
def test_nanobeir_evaluator_with_invalid_dataset():
"""Test that NanoBEIREvaluator raises an error for invalid dataset names."""
invalid_datasets = ["invalidDataset"]
with pytest.raises(
ValueError,
match=re.escape(
r"Dataset(s) ['invalidDataset'] not found in the NanoBEIR collection. "
r"Valid dataset names are: ['climatefever', 'dbpedia', 'fever', 'fiqa2018', 'hotpotqa', 'msmarco', 'nfcorpus', 'nq', 'quoraretrieval', 'scidocs', 'arguana', 'scifact', 'touche2020']"
),
):
NanoBEIREvaluator(dataset_names=invalid_datasets)
def test_nanobeir_evaluator_empty_inputs():
"""Test that NanoBEIREvaluator behaves correctly with empty datasets."""
with pytest.raises(ValueError, match="dataset_names cannot be empty. Use None to evaluate on all datasets."):
NanoBEIREvaluator(dataset_names=[])
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import Embedding, ImageUrl, Tensor
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), a Tensor (`Image.tensor`),
and an Embedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import Image
# use it directly
image = Image(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = MyEmbeddingModel(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray import Image
from docarray.typing import Embedding
from typing import Optional
# extend it
class MyImage(Image):
second_embedding: Optional[Embedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = MyEmbeddingModel(image.tensor)
image.second_embedding = MyEmbeddingModel(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import Document, Image, Text
# compose it
class MultiModalDoc(Document):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
"""
url: Optional[ImageUrl]
tensor: Optional[Tensor]
embedding: Optional[Embedding]
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import Embedding, ImageUrl, Tensor
class Image(BaseDocument):
"""
base Document for Image handling
"""
uri: Optional[ImageUrl]
tensor: Optional[Tensor]
embedding: Optional[Embedding]
|
from typing import (
TYPE_CHECKING,
Sequence,
)
import numpy as np
from docarray.helper import typename
if TYPE_CHECKING:
from docarray.typing import (
DocumentArrayIndexType,
)
class DelItemMixin:
"""Provide help function to enable advanced indexing in `__delitem__`"""
def __delitem__(self, index: 'DocumentArrayIndexType'):
if isinstance(index, (int, np.generic)) and not isinstance(index, bool):
self._del_doc_by_offset(int(index))
elif isinstance(index, str):
if index.startswith('@'):
raise NotImplementedError(
'Delete elements along traversal paths is not implemented'
)
else:
self._del_doc(index)
elif isinstance(index, slice):
self._del_docs_by_slice(index)
elif index is Ellipsis:
self._del_all_docs()
elif isinstance(index, Sequence):
if (
isinstance(index, tuple)
and len(index) == 2
and (
isinstance(index[0], (slice, Sequence, str, int))
or index[0] is Ellipsis
)
and isinstance(index[1], (str, Sequence))
):
# TODO: add support for cases such as da[1, ['text', 'id']]?
if isinstance(index[0], (str, int)) and isinstance(index[1], str):
# ambiguity only comes from the second string
if index[1] in self:
del self[index[0]]
del self[index[1]]
else:
self._set_doc_attr_by_id(index[0], index[1], None)
elif isinstance(index[0], (slice, Sequence)):
_attrs = index[1]
if isinstance(_attrs, str):
_attrs = (index[1],)
for _d in self[index[0]]:
for _aa in _attrs:
self._set_doc_attr_by_id(_d.id, _aa, None)
_d.pop(_aa)
elif isinstance(index[0], bool):
self._del_docs_by_mask(index)
elif isinstance(index[0], int):
for t in sorted(index, reverse=True):
del self[t]
elif isinstance(index[0], str):
for t in index:
del self[t]
elif isinstance(index, np.ndarray):
index = index.squeeze()
if index.ndim == 1:
del self[index.tolist()]
else:
raise IndexError(
f'When using np.ndarray as index, its `ndim` must =1. However, receiving ndim={index.ndim}'
)
else:
raise IndexError(f'Unsupported index type {typename(index)}: {index}')
|
from typing import (
TYPE_CHECKING,
Sequence,
)
import numpy as np
from ...helper import typename
if TYPE_CHECKING:
from ...typing import (
DocumentArrayIndexType,
)
class DelItemMixin:
"""Provide help function to enable advanced indexing in `__delitem__`"""
def __delitem__(self, index: 'DocumentArrayIndexType'):
if isinstance(index, (int, np.generic)) and not isinstance(index, bool):
self._del_doc_by_offset(int(index))
elif isinstance(index, str):
if index.startswith('@'):
raise NotImplementedError(
'Delete elements along traversal paths is not implemented'
)
else:
self._del_doc(index)
elif isinstance(index, slice):
self._del_docs_by_slice(index)
elif index is Ellipsis:
self._del_all_docs()
elif isinstance(index, Sequence):
if (
isinstance(index, tuple)
and len(index) == 2
and (
isinstance(index[0], (slice, Sequence, str, int))
or index[0] is Ellipsis
)
and isinstance(index[1], (str, Sequence))
):
# TODO: add support for cases such as da[1, ['text', 'id']]?
if isinstance(index[0], (str, int)) and isinstance(index[1], str):
# ambiguity only comes from the second string
if index[1] in self:
del self[index[0]]
del self[index[1]]
else:
self._set_doc_attr_by_id(index[0], index[1], None)
elif isinstance(index[0], (slice, Sequence)):
_attrs = index[1]
if isinstance(_attrs, str):
_attrs = (index[1],)
for _d in self[index[0]]:
for _aa in _attrs:
self._set_doc_attr_by_id(_d.id, _aa, None)
_d.pop(_aa)
elif isinstance(index[0], bool):
self._del_docs_by_mask(index)
elif isinstance(index[0], int):
for t in sorted(index, reverse=True):
del self[t]
elif isinstance(index[0], str):
for t in index:
del self[t]
elif isinstance(index, np.ndarray):
index = index.squeeze()
if index.ndim == 1:
del self[index.tolist()]
else:
raise IndexError(
f'When using np.ndarray as index, its `ndim` must =1. However, receiving ndim={index.ndim}'
)
else:
raise IndexError(f'Unsupported index type {typename(index)}: {index}')
|
from enum import Enum
from typing import TYPE_CHECKING, Union, overload
import numpy as np
if TYPE_CHECKING:
import torch
class Pooling(str, Enum):
"""Enum of possible pooling choices with pooling behaviors."""
CLS = "cls"
MEAN = "mean"
LAST = "last" # last token pooling
def __call__(self, array: np.ndarray) -> np.ndarray:
if self == self.CLS:
return self.cls_pooling(array)
elif self == self.LAST:
return self.last_pooling(array)
return self.mean_pooling(array)
@classmethod
@overload
def cls_pooling(cls, array: np.ndarray) -> np.ndarray: ...
@classmethod
@overload
# TODO: Remove this `type: ignore` after the false positive problem
# is addressed in mypy: https://github.com/python/mypy/issues/15683 .
def cls_pooling(cls, array: "torch.Tensor") -> "torch.Tensor": # type: ignore
...
@classmethod
def cls_pooling(
cls, array: "Union[np.ndarray, torch.Tensor]"
) -> "Union[np.ndarray, torch.Tensor]":
if len(array.shape) == 3:
return array[:, 0]
if len(array.shape) == 2:
return array[0]
raise NotImplementedError(f"Unhandled shape {array.shape}.")
@classmethod
def mean_pooling(cls, array: np.ndarray) -> np.ndarray:
if len(array.shape) == 3:
return array.mean(axis=1)
if len(array.shape) == 2:
return array.mean(axis=0)
raise NotImplementedError(f"Unhandled shape {array.shape}.")
@classmethod
@overload
def last_pooling(cls, array: np.ndarray) -> np.ndarray: ...
@classmethod
@overload
# TODO: Remove this `type: ignore` after the false positive problem
# is addressed in mypy: https://github.com/python/mypy/issues/15683 .
def last_pooling(cls, array: "torch.Tensor") -> "torch.Tensor": # type: ignore
...
@classmethod
def last_pooling(
cls, array: "Union[np.ndarray, torch.Tensor]"
) -> "Union[np.ndarray, torch.Tensor]":
if len(array.shape) == 3:
return array[:, -1]
if len(array.shape) == 2:
return array[-1]
raise NotImplementedError(f"Unhandled shape {array.shape}.")
|
from enum import Enum
from typing import TYPE_CHECKING, Union, overload
import numpy as np
if TYPE_CHECKING:
import torch
class Pooling(str, Enum):
"""Enum of possible pooling choices with pooling behaviors."""
CLS = "cls"
MEAN = "mean"
LAST = "last" # last token pooling
def __call__(self, array: np.ndarray) -> np.ndarray:
if self == self.CLS:
return self.cls_pooling(array)
elif self == self.LAST:
return self.last_pooling(array)
return self.mean_pooling(array)
@classmethod
@overload
def cls_pooling(cls, array: np.ndarray) -> np.ndarray:
...
@classmethod
@overload
# TODO: Remove this `type: ignore` after the false positive problem
# is addressed in mypy: https://github.com/python/mypy/issues/15683 .
def cls_pooling(cls, array: "torch.Tensor") -> "torch.Tensor": # type: ignore
...
@classmethod
def cls_pooling(
cls, array: "Union[np.ndarray, torch.Tensor]"
) -> "Union[np.ndarray, torch.Tensor]":
if len(array.shape) == 3:
return array[:, 0]
if len(array.shape) == 2:
return array[0]
raise NotImplementedError(f"Unhandled shape {array.shape}.")
@classmethod
def mean_pooling(cls, array: np.ndarray) -> np.ndarray:
if len(array.shape) == 3:
return array.mean(axis=1)
if len(array.shape) == 2:
return array.mean(axis=0)
raise NotImplementedError(f"Unhandled shape {array.shape}.")
@classmethod
@overload
def last_pooling(cls, array: np.ndarray) -> np.ndarray:
...
@classmethod
@overload
# TODO: Remove this `type: ignore` after the false positive problem
# is addressed in mypy: https://github.com/python/mypy/issues/15683 .
def last_pooling(cls, array: "torch.Tensor") -> "torch.Tensor": # type: ignore
...
@classmethod
def last_pooling(
cls, array: "Union[np.ndarray, torch.Tensor]"
) -> "Union[np.ndarray, torch.Tensor]":
if len(array.shape) == 3:
return array[:, -1]
if len(array.shape) == 2:
return array[-1]
raise NotImplementedError(f"Unhandled shape {array.shape}.")
|
_base_ = './yolox_s_8xb8-300e_coco.py'
# model settings
model = dict(
data_preprocessor=dict(batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(320, 640),
size_divisor=32,
interval=10)
]),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # width, height
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
# img_scale is (width, height)
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
# Resize and Pad are for the last 15 epochs when Mosaic and
# RandomAffine are closed by YOLOXModeSwitchHook.
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './yolox_s_8xb8-300e_coco.py'
# model settings
model = dict(
data_preprocessor=dict(batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(320, 640),
size_divisor=32,
interval=10)
]),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
# Resize and Pad are for the last 15 epochs when Mosaic and
# RandomAffine are closed by YOLOXModeSwitchHook.
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from torchvision.datapoints._datapoint import Datapoint
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(Datapoint):
categories: Optional[Sequence[str]]
@classmethod
def _wrap(cls: Type[L], tensor: torch.Tensor, *, categories: Optional[Sequence[str]]) -> L:
label_base = tensor.as_subclass(cls)
label_base.categories = categories
return label_base
def __new__(
cls: Type[L],
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> L:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, categories=categories)
@classmethod
def wrap_like(cls: Type[L], other: L, tensor: torch.Tensor, *, categories: Optional[Sequence[str]] = None) -> L:
return cls._wrap(
tensor,
categories=categories if categories is not None else other.categories,
)
@classmethod
def from_category(
cls: Type[L],
category: str,
*,
categories: Sequence[str],
**kwargs: Any,
) -> L:
return cls(categories.index(category), categories=categories, **kwargs)
class Label(_LabelBase):
def to_categories(self) -> Any:
if self.categories is None:
raise RuntimeError("Label does not have categories")
return tree_map(lambda idx: self.categories[idx], self.tolist())
class OneHotLabel(_LabelBase):
def __new__(
cls,
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> OneHotLabel:
one_hot_label = super().__new__(
cls, data, categories=categories, dtype=dtype, device=device, requires_grad=requires_grad
)
if categories is not None and len(categories) != one_hot_label.shape[-1]:
raise ValueError()
return one_hot_label
|
from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from ._datapoint import Datapoint
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(Datapoint):
categories: Optional[Sequence[str]]
@classmethod
def _wrap(cls: Type[L], tensor: torch.Tensor, *, categories: Optional[Sequence[str]]) -> L:
label_base = tensor.as_subclass(cls)
label_base.categories = categories
return label_base
def __new__(
cls: Type[L],
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> L:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, categories=categories)
@classmethod
def wrap_like(cls: Type[L], other: L, tensor: torch.Tensor, *, categories: Optional[Sequence[str]] = None) -> L:
return cls._wrap(
tensor,
categories=categories if categories is not None else other.categories,
)
@classmethod
def from_category(
cls: Type[L],
category: str,
*,
categories: Sequence[str],
**kwargs: Any,
) -> L:
return cls(categories.index(category), categories=categories, **kwargs)
class Label(_LabelBase):
def to_categories(self) -> Any:
if self.categories is None:
raise RuntimeError("Label does not have categories")
return tree_map(lambda idx: self.categories[idx], self.tolist())
class OneHotLabel(_LabelBase):
def __new__(
cls,
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> OneHotLabel:
one_hot_label = super().__new__(
cls, data, categories=categories, dtype=dtype, device=device, requires_grad=requires_grad
)
if categories is not None and len(categories) != one_hot_label.shape[-1]:
raise ValueError()
return one_hot_label
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.