input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import itertools
import os.path
import pytest
from docarray import Document, DocumentArray
from jina import Client, Executor, Flow, requests
from jina.helper import random_port
PROTOCOLS = ['grpc', 'http', 'websocket']
cur_dir = os.path.dirname(__file__)
class MyExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'processed'
@pytest.mark.parametrize(
'ports,protocols',
[
*[
([random_port(), random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=3)
],
*[
([random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=2)
],
*[
([random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=1)
],
],
)
def test_flow_multiprotocol(ports, protocols):
flow = Flow().config_gateway(port=ports, protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
list(protocols)
for protocols in itertools.chain(
itertools.combinations(PROTOCOLS, r=3),
itertools.combinations(PROTOCOLS, r=2),
)
],
)
def test_flow_multiprotocol_default_random_ports(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(flow.port, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
['grpc'],
['http'],
['websocket'],
],
)
def test_flow_single_protocol_default_random_port(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for protocol in protocols:
client = Client(port=flow.port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_aliases():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(ports=ports, protocols=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_yaml():
flow = Flow.load_config(os.path.join(cur_dir, 'yaml/multi-protocol.yml'))
with flow:
for port, protocol in zip([12345, 12344, 12343], ['grpc', 'http', 'websocket']):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
def test_flow_multiprotocol_ports_protocols_mismatch():
flow = Flow().config_gateway(port=[random_port()], protocol=['grpc', 'http'])
with pytest.raises(ValueError) as err_info:
with flow:
pass
assert (
'You need to specify as much protocols as ports if you want to use a jina built-in gateway'
in err_info.value.args[0]
)
|
import itertools
import os.path
import pytest
from docarray import Document, DocumentArray
from jina import Client, Executor, Flow, requests
from jina.helper import random_port
PROTOCOLS = ['grpc', 'http', 'websocket']
cur_dir = os.path.dirname(__file__)
class MyExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'processed'
@pytest.mark.parametrize(
'ports,protocols',
[
*[
([random_port(), random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=3)
],
*[
([random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=2)
],
*[
([random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=1)
],
],
)
def test_flow_multiprotocol(ports, protocols):
flow = Flow().config_gateway(port=ports, protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_aliases():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(ports=ports, protocols=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_yaml():
flow = Flow.load_config(os.path.join(cur_dir, 'yaml/multi-protocol.yml'))
with flow:
for port, protocol in zip([12345, 12344, 12343], ['grpc', 'http', 'websocket']):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
def test_flow_multiprotocol_ports_protocols_mismatch():
flow = Flow().config_gateway(port=[random_port()], protocol=['grpc', 'http'])
with pytest.raises(ValueError) as err_info:
with flow:
pass
assert (
'You need to specify as much protocols as ports if you want to use a jina built-in gateway'
in err_info.value.args[0]
)
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Queries: 49.92307692307692
Average Corpus: 4334.7692307692305
Aggregated for Score Function: dot
Accuracy@1: 59.18%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 86.92%
Precision@1: 59.18%
Recall@1: 35.62%
Precision@3: 36.26%
Recall@3: 50.85%
Precision@5: 27.75%
Recall@5: 56.57%
Precision@10: 19.24%
Recall@10: 64.31%
MRR@10: 0.6848
NDCG@10: 0.6218
Model Query Sparsity: Active Dimensions: 72.7, Sparsity Ratio: 0.9976
Model Corpus Sparsity: Active Dimensions: 165.9, Sparsity Ratio: 0.9946
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6218
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Queries: 49.92307692307692
Average Corpus: 4334.7692307692305
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.72%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.21%
MRR@10: 0.6822
NDCG@10: 0.6204
Model Query Sparsity: Active Dimensions: 74.9, Sparsity Ratio: 0.9975
Model Corpus Sparsity: Active Dimensions: 174.8, Sparsity Ratio: 0.9943
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
__version__ = '0.40.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.39.2'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
_base_ = './sparse-rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 36
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
_base_ = './sparse_rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 36
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Type, TypeVar
from docarray.utils._internal.pydantic import is_pydantic_v2
if TYPE_CHECKING:
if is_pydantic_v2:
from pydantic import GetCoreSchemaHandler
from pydantic_core import core_schema
from docarray.base_doc.base_node import BaseNode
T = TypeVar('T')
class AbstractType(BaseNode):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
@abstractmethod
def _docarray_validate(cls: Type[T], value: Any) -> T:
...
if is_pydantic_v2:
@classmethod
def validate(cls: Type[T], value: Any, _: Any) -> T:
return cls._docarray_validate(value)
else:
@classmethod
def validate(
cls: Type[T],
value: Any,
) -> T:
return cls._docarray_validate(value)
if is_pydantic_v2:
@classmethod
@abstractmethod
def __get_pydantic_core_schema__(
cls, _source_type: Any, _handler: 'GetCoreSchemaHandler'
) -> 'core_schema.CoreSchema':
...
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Type, TypeVar
from docarray.utils._internal.pydantic import is_pydantic_v2
if TYPE_CHECKING:
if is_pydantic_v2:
from pydantic import GetCoreSchemaHandler
from pydantic_core import core_schema
from docarray.base_doc.base_node import BaseNode
T = TypeVar('T')
class AbstractType(BaseNode):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
@abstractmethod
def _docarray_validate(cls: Type[T], value: Any) -> T:
...
if is_pydantic_v2:
@classmethod
def validate(cls: Type[T], value: Any, _: Any) -> T:
return cls._docarray_validate(value)
else:
@classmethod
def validate(
cls: Type[T],
value: Any,
) -> T:
return cls._docarray_validate(value)
if is_pydantic_v2:
@classmethod
@abstractmethod
def __get_pydantic_core_schema__(
cls, _source_type: Any, _handler: 'GetCoreSchemaHandler'
) -> 'core_schema.CoreSchema':
...
|
from typing import Any, Dict
import torch
from torch.nn.functional import one_hot
from torchvision.prototype import tv_tensors as proto_tv_tensors
from torchvision.transforms.v2 import Transform
class LabelToOneHot(Transform):
_transformed_types = (proto_tv_tensors.Label,)
def __init__(self, num_categories: int = -1):
super().__init__()
self.num_categories = num_categories
def transform(self, inpt: proto_tv_tensors.Label, params: Dict[str, Any]) -> proto_tv_tensors.OneHotLabel:
num_categories = self.num_categories
if num_categories == -1 and inpt.categories is not None:
num_categories = len(inpt.categories)
output = one_hot(inpt.as_subclass(torch.Tensor), num_classes=num_categories)
return proto_tv_tensors.OneHotLabel(output, categories=inpt.categories)
def extra_repr(self) -> str:
if self.num_categories == -1:
return ""
return f"num_categories={self.num_categories}"
|
from typing import Any, Dict
import torch
from torch.nn.functional import one_hot
from torchvision.prototype import tv_tensors as proto_tv_tensors
from torchvision.transforms.v2 import Transform
class LabelToOneHot(Transform):
_transformed_types = (proto_tv_tensors.Label,)
def __init__(self, num_categories: int = -1):
super().__init__()
self.num_categories = num_categories
def _transform(self, inpt: proto_tv_tensors.Label, params: Dict[str, Any]) -> proto_tv_tensors.OneHotLabel:
num_categories = self.num_categories
if num_categories == -1 and inpt.categories is not None:
num_categories = len(inpt.categories)
output = one_hot(inpt.as_subclass(torch.Tensor), num_classes=num_categories)
return proto_tv_tensors.OneHotLabel(output, categories=inpt.categories)
def extra_repr(self) -> str:
if self.num_categories == -1:
return ""
return f"num_categories={self.num_categories}"
|
from __future__ import annotations
import importlib.metadata
import importlib.util
import operator as op
from typing import Union
from packaging import version
STR_OPERATION_TO_FUNC = {
">": op.gt,
">=": op.ge,
"==": op.eq,
"!=": op.ne,
"<=": op.le,
"<": op.lt,
}
_optimum_available = importlib.util.find_spec("optimum") is not None
_optimum_version = "N/A"
if _optimum_available:
try:
_optimum_version = importlib.metadata.version("optimum")
except importlib.metadata.PackageNotFoundError:
_optimum_available = False
_optimum_intel_available = (
_optimum_available and importlib.util.find_spec("optimum.intel") is not None
)
_optimum_intel_version = "N/A"
if _optimum_intel_available:
try:
_optimum_intel_version = importlib.metadata.version("optimum-intel")
except importlib.metadata.PackageNotFoundError:
_optimum_intel_available = False
_ipex_available = importlib.util.find_spec("intel_extension_for_pytorch") is not None
_openvino_available = importlib.util.find_spec("openvino") is not None
# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L319
def compare_versions(
library_or_version: Union[str, version.Version],
operation: str,
requirement_version: str,
) -> bool:
"""Compare a library version to some requirement using a given operation.
Arguments:
library_or_version (`str` or `packaging.version.Version`):
A library name or a version to check.
operation (`str`):
A string representation of an operator, such as `">"` or `"<="`.
requirement_version (`str`):
The version to compare the library version against
"""
if operation not in STR_OPERATION_TO_FUNC:
msg = (
f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}"
f", received {operation}"
)
raise ValueError(msg)
if isinstance(library_or_version, str):
library_or_version = version.parse(
importlib.metadata.version(library_or_version)
)
return STR_OPERATION_TO_FUNC[operation](
library_or_version, version.parse(requirement_version)
)
def is_optimum_available() -> bool:
return _optimum_available
def is_optimum_intel_available() -> bool:
return _optimum_intel_available
def is_ipex_available() -> bool:
return _ipex_available
def is_openvino_available() -> bool:
return _openvino_available
def is_optimum_version(operation: str, reference_version: str) -> bool:
"""Compare the current Optimum version to a given reference with an operation."""
if not _optimum_version:
return False
return compare_versions(
version.parse(_optimum_version), operation, reference_version
)
def is_optimum_intel_version(operation: str, reference_version: str) -> bool:
"""Compare current Optimum Intel version to a given reference with an operation."""
if not _optimum_intel_version:
return False
return compare_versions(
version.parse(_optimum_intel_version), operation, reference_version
)
IMPORT_ERROR = """
requires the {0} library but it was not found in your environment.
You can install it with pip: `pip install {0}`.
Please note that you may need to restart your runtime after installation.
"""
|
import importlib.metadata
import importlib.util
import operator as op
from typing import Union
from packaging import version
STR_OPERATION_TO_FUNC = {
">": op.gt,
">=": op.ge,
"==": op.eq,
"!=": op.ne,
"<=": op.le,
"<": op.lt,
}
_optimum_available = importlib.util.find_spec("optimum") is not None
_optimum_version = "N/A"
if _optimum_available:
try:
_optimum_version = importlib.metadata.version("optimum")
except importlib.metadata.PackageNotFoundError:
_optimum_available = False
_optimum_intel_available = (
_optimum_available and importlib.util.find_spec("optimum.intel") is not None
)
_optimum_intel_version = "N/A"
if _optimum_intel_available:
try:
_optimum_intel_version = importlib.metadata.version("optimum-intel")
except importlib.metadata.PackageNotFoundError:
_optimum_intel_available = False
_ipex_available = importlib.util.find_spec("intel_extension_for_pytorch") is not None
_openvino_available = importlib.util.find_spec("openvino") is not None
# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L319
def compare_versions(
library_or_version: Union[str, version.Version],
operation: str,
requirement_version: str,
) -> bool:
"""
Compare a library version to some requirement using a given operation.
Arguments:
library_or_version (`str` or `packaging.version.Version`):
A library name or a version to check.
operation (`str`):
A string representation of an operator, such as `">"` or `"<="`.
requirement_version (`str`):
The version to compare the library version against
"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(
f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}"
f", received {operation}"
)
if isinstance(library_or_version, str):
library_or_version = version.parse(
importlib.metadata.version(library_or_version)
)
return STR_OPERATION_TO_FUNC[operation](
library_or_version, version.parse(requirement_version)
)
def is_optimum_available() -> bool:
return _optimum_available
def is_optimum_intel_available() -> bool:
return _optimum_intel_available
def is_ipex_available() -> bool:
return _ipex_available
def is_openvino_available() -> bool:
return _openvino_available
def is_optimum_version(operation: str, reference_version: str) -> bool:
"""
Compare the current Optimum version to a given reference with an operation.
"""
if not _optimum_version:
return False
return compare_versions(
version.parse(_optimum_version), operation, reference_version
)
def is_optimum_intel_version(operation: str, reference_version: str) -> bool:
"""
Compare the current Optimum Intel version to a given reference with an operation.
"""
if not _optimum_intel_version:
return False
return compare_versions(
version.parse(_optimum_intel_version), operation, reference_version
)
IMPORT_ERROR = """
requires the {0} library but it was not found in your environment.
You can install it with pip: `pip install {0}`.
Please note that you may need to restart your runtime after installation.
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@DETECTORS.register_module()
class SOLO(SingleStageInstanceSegmentor):
"""`SOLO: Segmenting Objects by Locations
<https://arxiv.org/abs/1912.04488>`_
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None,
pretrained=None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
pretrained=pretrained)
|
from ..builder import DETECTORS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@DETECTORS.register_module()
class SOLO(SingleStageInstanceSegmentor):
"""`SOLO: Segmenting Objects by Locations
<https://arxiv.org/abs/1912.04488>`_
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None,
pretrained=None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
pretrained=pretrained)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# training schedule for 2x
train_cfg = dict(max_epochs=24)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
_base_ = './retinanet_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
_base_ = 'retinanet_r50_fpn_1x_coco.py'
# training schedule for 90k
train_cfg = dict(
type='IterBasedTrainLoop', max_iters=90000, val_interval=10000)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=90000,
by_epoch=False,
milestones=[60000, 80000],
gamma=0.1)
]
default_hooks = dict(checkpoint=dict(interval=10000))
|
_base_ = 'retinanet_r50_fpn_1x_coco.py'
# training schedule for 90k
train_cfg = dict(by_epoch=False, max_iters=90000)
val_cfg = dict(interval=10000)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=90000,
by_epoch=False,
milestones=[60000, 80000],
gamma=0.1)
]
default_hooks = dict(checkpoint=dict(interval=10000))
|
# coding: utf-8
import pytest
import lightgbm as lgb
from .utils import pickle_obj, unpickle_obj
SERIALIZERS = ["pickle", "joblib", "cloudpickle"]
def pickle_and_unpickle_object(obj, serializer):
with lgb.basic._TempFile() as tmp_file:
pickle_obj(
obj=obj,
filepath=tmp_file.name,
serializer=serializer
)
obj_from_disk = unpickle_obj(
filepath=tmp_file.name,
serializer=serializer
)
return obj_from_disk
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_early_stopping_callback_is_picklable(serializer):
rounds = 5
callback = lgb.early_stopping(stopping_rounds=rounds)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 30
assert callback_from_disk.before_iteration is False
assert callback.stopping_rounds == callback_from_disk.stopping_rounds
assert callback.stopping_rounds == rounds
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_log_evaluation_callback_is_picklable(serializer):
periods = 42
callback = lgb.log_evaluation(period=periods)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 10
assert callback_from_disk.before_iteration is False
assert callback.period == callback_from_disk.period
assert callback.period == periods
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_record_evaluation_callback_is_picklable(serializer):
results = {}
callback = lgb.record_evaluation(eval_result=results)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 20
assert callback_from_disk.before_iteration is False
assert callback.eval_result == callback_from_disk.eval_result
assert callback.eval_result is results
|
# coding: utf-8
import pytest
import lightgbm as lgb
from .utils import pickle_obj, unpickle_obj
@pytest.mark.parametrize('serializer', ["pickle", "joblib", "cloudpickle"])
def test_early_stopping_callback_is_picklable(serializer, tmp_path):
rounds = 5
callback = lgb.early_stopping(stopping_rounds=rounds)
tmp_file = tmp_path / "early_stopping.pkl"
pickle_obj(
obj=callback,
filepath=tmp_file,
serializer=serializer
)
callback_from_disk = unpickle_obj(
filepath=tmp_file,
serializer=serializer
)
assert callback.stopping_rounds == callback_from_disk.stopping_rounds
assert callback.stopping_rounds == rounds
@pytest.mark.parametrize('serializer', ["pickle", "joblib", "cloudpickle"])
def test_log_evaluation_callback_is_picklable(serializer, tmp_path):
periods = 42
callback = lgb.log_evaluation(period=periods)
tmp_file = tmp_path / "log_evaluation.pkl"
pickle_obj(
obj=callback,
filepath=tmp_file,
serializer=serializer
)
callback_from_disk = unpickle_obj(
filepath=tmp_file,
serializer=serializer
)
assert callback.period == callback_from_disk.period
assert callback.period == periods
|
from urllib.parse import parse_qs, urlparse
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api.formatters import TextFormatter
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class TranscribeYoutubeVideoBlock(Block):
class Input(BlockSchema):
youtube_url: str = SchemaField(
title="YouTube URL",
description="The URL of the YouTube video to transcribe",
placeholder="https://www.youtube.com/watch?v=dQw4w9WgXcQ",
)
class Output(BlockSchema):
video_id: str = SchemaField(description="The extracted YouTube video ID")
transcript: str = SchemaField(description="The transcribed text of the video")
error: str = SchemaField(
description="Any error message if the transcription fails"
)
def __init__(self):
super().__init__(
id="f3a8f7e1-4b1d-4e5f-9f2a-7c3d5a2e6b4c",
input_schema=TranscribeYoutubeVideoBlock.Input,
output_schema=TranscribeYoutubeVideoBlock.Output,
description="Transcribes a YouTube video.",
categories={BlockCategory.SOCIAL},
test_input={"youtube_url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"},
test_output=[
("video_id", "dQw4w9WgXcQ"),
(
"transcript",
"Never gonna give you up\nNever gonna let you down",
),
],
test_mock={
"get_transcript": lambda video_id: [
{"text": "Never gonna give you up"},
{"text": "Never gonna let you down"},
],
},
)
@staticmethod
def extract_video_id(url: str) -> str:
parsed_url = urlparse(url)
if parsed_url.netloc == "youtu.be":
return parsed_url.path[1:]
if parsed_url.netloc in ("www.youtube.com", "youtube.com"):
if parsed_url.path == "/watch":
p = parse_qs(parsed_url.query)
return p["v"][0]
if parsed_url.path[:7] == "/embed/":
return parsed_url.path.split("/")[2]
if parsed_url.path[:3] == "/v/":
return parsed_url.path.split("/")[2]
raise ValueError(f"Invalid YouTube URL: {url}")
@staticmethod
def get_transcript(video_id: str):
try:
transcript_list = YouTubeTranscriptApi.list_transcripts(video_id)
if not transcript_list:
raise ValueError(f"No transcripts found for the video: {video_id}")
for transcript in transcript_list:
first_transcript = transcript_list.find_transcript(
[transcript.language_code]
)
return YouTubeTranscriptApi.get_transcript(
video_id, languages=[first_transcript.language_code]
)
except Exception:
raise ValueError(f"No transcripts found for the video: {video_id}")
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
video_id = self.extract_video_id(input_data.youtube_url)
yield "video_id", video_id
transcript = self.get_transcript(video_id)
formatter = TextFormatter()
transcript_text = formatter.format_transcript(transcript)
yield "transcript", transcript_text
|
from urllib.parse import parse_qs, urlparse
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api.formatters import TextFormatter
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class TranscribeYoutubeVideoBlock(Block):
class Input(BlockSchema):
youtube_url: str = SchemaField(
title="YouTube URL",
description="The URL of the YouTube video to transcribe",
placeholder="https://www.youtube.com/watch?v=dQw4w9WgXcQ",
)
class Output(BlockSchema):
video_id: str = SchemaField(description="The extracted YouTube video ID")
transcript: str = SchemaField(description="The transcribed text of the video")
error: str = SchemaField(
description="Any error message if the transcription fails"
)
def __init__(self):
super().__init__(
id="f3a8f7e1-4b1d-4e5f-9f2a-7c3d5a2e6b4c",
input_schema=TranscribeYoutubeVideoBlock.Input,
output_schema=TranscribeYoutubeVideoBlock.Output,
description="Transcribes a YouTube video.",
categories={BlockCategory.SOCIAL},
test_input={"youtube_url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"},
test_output=[
("video_id", "dQw4w9WgXcQ"),
(
"transcript",
"Never gonna give you up\nNever gonna let you down",
),
],
test_mock={
"get_transcript": lambda video_id: [
{"text": "Never gonna give you up"},
{"text": "Never gonna let you down"},
],
},
)
@staticmethod
def extract_video_id(url: str) -> str:
parsed_url = urlparse(url)
if parsed_url.netloc == "youtu.be":
return parsed_url.path[1:]
if parsed_url.netloc in ("www.youtube.com", "youtube.com"):
if parsed_url.path == "/watch":
p = parse_qs(parsed_url.query)
return p["v"][0]
if parsed_url.path[:7] == "/embed/":
return parsed_url.path.split("/")[2]
if parsed_url.path[:3] == "/v/":
return parsed_url.path.split("/")[2]
raise ValueError(f"Invalid YouTube URL: {url}")
@staticmethod
def get_transcript(video_id: str):
try:
transcript_list = YouTubeTranscriptApi.list_transcripts(video_id)
if not transcript_list:
raise ValueError(f"No transcripts found for the video: {video_id}")
for transcript in transcript_list:
first_transcript = transcript_list.find_transcript(
[transcript.language_code]
)
return YouTubeTranscriptApi.get_transcript(
video_id, languages=[first_transcript.language_code]
)
except Exception:
raise ValueError(f"No transcripts found for the video: {video_id}")
def run(self, input_data: Input, **kwargs) -> BlockOutput:
video_id = self.extract_video_id(input_data.youtube_url)
yield "video_id", video_id
transcript = self.get_transcript(video_id)
formatter = TextFormatter()
transcript_text = formatter.format_transcript(transcript)
yield "transcript", transcript_text
|
prompt_template = """Given the following question and context, return YES if the context is relevant to the question and NO if it isn't.
> Question: {question}
> Context:
>>>
{context}
>>>
> Relevant (YES / NO):""" # noqa: E501
|
# flake8: noqa
prompt_template = """Given the following question and context, return YES if the context is relevant to the question and NO if it isn't.
> Question: {question}
> Context:
>>>
{context}
>>>
> Relevant (YES / NO):"""
|
from typing import Optional
import numpy as np
from docarray import BaseDoc, DocVec
from docarray.typing import ImageUrl, NdArray
def test_optional():
class Features(BaseDoc):
tensor: NdArray[100]
class Image(BaseDoc):
url: ImageUrl
features: Optional[Features] = None
docs = DocVec[Image]([Image(url='http://url.com/foo.png') for _ in range(10)])
print(docs.features) # None
docs.features = [Features(tensor=np.random.random([100])) for _ in range(10)]
print(docs.features) # <DocVec[Features] (length=10)>
assert isinstance(docs.features, DocVec[Features])
docs.features.tensor = np.ones((10, 100))
assert docs[0].features.tensor.shape == (100,)
docs.features = None
assert docs[0].features is None
|
from typing import Optional
import numpy as np
from docarray import BaseDoc, DocVec
from docarray.typing import ImageUrl, NdArray
def test_optional():
class Features(BaseDoc):
tensor: NdArray[100]
class Image(BaseDoc):
url: ImageUrl
features: Optional[Features]
docs = DocVec[Image]([Image(url='http://url.com/foo.png') for _ in range(10)])
print(docs.features) # None
docs.features = [Features(tensor=np.random.random([100])) for _ in range(10)]
print(docs.features) # <DocVec[Features] (length=10)>
assert isinstance(docs.features, DocVec[Features])
docs.features.tensor = np.ones((10, 100))
assert docs[0].features.tensor.shape == (100,)
docs.features = None
assert docs[0].features is None
|
from ._dsp import (
adsr_envelope,
extend_pitch,
filter_waveform,
frequency_impulse_response,
oscillator_bank,
sinc_impulse_response,
)
from ._rir import simulate_rir_ism
from .functional import barkscale_fbanks
__all__ = [
"adsr_envelope",
"barkscale_fbanks",
"extend_pitch",
"filter_waveform",
"frequency_impulse_response",
"oscillator_bank",
"sinc_impulse_response",
"simulate_rir_ism",
]
|
from ._dsp import (
adsr_envelope,
extend_pitch,
filter_waveform,
frequency_impulse_response,
oscillator_bank,
sinc_impulse_response,
)
from .functional import barkscale_fbanks
__all__ = [
"adsr_envelope",
"barkscale_fbanks",
"extend_pitch",
"filter_waveform",
"frequency_impulse_response",
"oscillator_bank",
"sinc_impulse_response",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Tuple
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils import (InstanceList, OptConfigType, OptMultiConfig,
SampleList)
from mmdet.registry import MODELS
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor: OptMultiConfig = None,
bbox_head: OptMultiConfig = None,
mask_roi_extractor: OptMultiConfig = None,
mask_head: OptMultiConfig = None,
shared_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
self.shared_head = MODELS.build(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self) -> bool:
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self) -> bool:
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self) -> bool:
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self, *args, **kwargs):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self, *args, **kwargs):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self, *args, **kwargs):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList):
"""Perform forward propagation and loss calculation of the roi head on
the features of the upstream network."""
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (N, C, H, W).
rpn_results_list (list[:obj:`InstanceData`]): list of region
proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results to
the original image. Defaults to True.
Returns:
list[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
assert self.with_bbox, 'Bbox head must be implemented.'
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
# TODO: nms_op in mmcv need be enhanced, the bbox result may get
# difference when not rescale in bbox_head
# If it has the mask branch, the bbox branch does not need
# to be scaled to the original image scale, because the mask
# branch will scale both bbox and mask at the same time.
bbox_rescale = rescale if not self.with_mask else False
results_list = self.predict_bbox(
x,
batch_img_metas,
rpn_results_list,
rcnn_test_cfg=self.test_cfg,
rescale=bbox_rescale)
if self.with_mask:
results_list = self.predict_mask(
x, batch_img_metas, results_list, rescale=rescale)
return results_list
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Tuple
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils import (InstanceList, OptConfigType, OptMultiConfig,
SampleList)
from mmdet.registry import MODELS
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor: OptMultiConfig = None,
bbox_head: OptMultiConfig = None,
mask_roi_extractor: OptMultiConfig = None,
mask_head: OptMultiConfig = None,
shared_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
self.shared_head = MODELS.build(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self) -> bool:
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self) -> bool:
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self) -> bool:
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self, *args, **kwargs):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self, *args, **kwargs):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self, *args, **kwargs):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList, **kwargs):
"""Perform forward propagation and loss calculation of the roi head on
the features of the upstream network."""
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (N, C, H, W).
rpn_results_list (list[:obj:`InstanceData`]): list of region
proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results to
the original image. Defaults to True.
Returns:
list[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
assert self.with_bbox, 'Bbox head must be implemented.'
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
# TODO: nms_op in mmcv need be enhanced, the bbox result may get
# difference when not rescale in bbox_head
# If it has the mask branch, the bbox branch does not need
# to be scaled to the original image scale, because the mask
# branch will scale both bbox and mask at the same time.
bbox_rescale = rescale if not self.with_mask else False
results_list = self.predict_bbox(
x,
batch_img_metas,
rpn_results_list,
rcnn_test_cfg=self.test_cfg,
rescale=bbox_rescale)
if self.with_mask:
results_list = self.predict_mask(
x, batch_img_metas, results_list, rescale=rescale)
return results_list
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmengine.dist import get_world_size
from mmengine.logging import print_log
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RTMDet(SingleStageDetector):
"""Implementation of RTMDet.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
use_syncbn (bool): Whether to use SyncBatchNorm. Defaults to True.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
use_syncbn: bool = True) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
# TODO: Waiting for mmengine support
if use_syncbn and get_world_size() > 1:
torch.nn.SyncBatchNorm.convert_sync_batchnorm(self)
print_log('Using SyncBatchNorm()', 'current')
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmengine.dist import get_world_size
from mmengine.logging import print_log
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RTMDet(SingleStageDetector):
"""Implementation of RTMDet.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
use_syncbn (bool): Whether to use SyncBatchNorm. Defaults to True.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
use_syncbn: bool = True) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
# TODO: Waiting for mmengine support
if use_syncbn and get_world_size() > 1:
torch.nn.SyncBatchNorm.convert_sync_batchnorm(self)
print_log('Using SyncBatchNorm()', 'current')
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
from mmengine.config import ConfigDict
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class MaskRCNN(TwoStageDetector):
"""Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
def __init__(self,
backbone: Union[ConfigDict, dict],
rpn_head: Union[ConfigDict, dict],
roi_head: Union[ConfigDict, dict],
train_cfg: Union[ConfigDict, dict],
test_cfg: Union[ConfigDict, dict],
neck: Optional[Union[ConfigDict, dict]] = None,
pretrained: Optional[str] = None,
preprocess_cfg: Optional[Union[ConfigDict, dict]] = None,
init_cfg: Optional[Union[ConfigDict, dict]] = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
preprocess_cfg=preprocess_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class MaskRCNN(TwoStageDetector):
"""Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None,
img_norm_cfg=None):
super(MaskRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
img_norm_cfg=img_norm_cfg)
|
from keras.src.backend.config import backend
if backend() == "torch":
# When using the torch backend,
# torch needs to be imported first, otherwise it will segfault
# upon import.
import torch
from keras.src.api_export import keras_export
from keras.src.backend.common.dtypes import result_type
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.keras_tensor import any_symbolic_tensors
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.masking import get_keras_mask
from keras.src.backend.common.masking import set_keras_mask
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.stateless_scope import get_stateless_scope
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.common.symbolic_scope import in_symbolic_scope
from keras.src.backend.common.variables import AutocastScope
from keras.src.backend.common.variables import Variable
from keras.src.backend.common.variables import get_autocast_scope
from keras.src.backend.common.variables import is_float_dtype
from keras.src.backend.common.variables import is_int_dtype
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.common.variables import standardize_shape
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.backend.config import standardize_data_format
# Import backend functions.
if backend() == "tensorflow":
from keras.src.backend.tensorflow import * # noqa: F403
from keras.src.backend.tensorflow.core import Variable as BackendVariable
elif backend() == "jax":
from keras.src.backend.jax import * # noqa: F403
from keras.src.backend.jax.core import Variable as BackendVariable
elif backend() == "torch":
from keras.src.backend.torch import * # noqa: F403
from keras.src.backend.torch.core import Variable as BackendVariable
distribution_lib = None
elif backend() == "numpy":
from keras.src.backend.numpy import * # noqa: F403
from keras.src.backend.numpy.core import Variable as BackendVariable
distribution_lib = None
else:
raise ValueError(f"Unable to import backend : {backend()}")
@keras_export("keras.Variable")
class Variable(BackendVariable): # noqa: F811
pass
backend_name_scope = name_scope # noqa: F405
@keras_export("keras.name_scope")
class name_scope(backend_name_scope):
pass
@keras_export("keras.device")
def device(device_name):
return device_scope(device_name) # noqa: F405
|
from keras.src.backend.config import backend
if backend() == "torch":
# When using the torch backend,
# torch needs to be imported first, otherwise it will segfault
# upon import.
import torch
from keras.src.api_export import keras_export
from keras.src.backend.common.dtypes import result_type
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.keras_tensor import any_symbolic_tensors
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.masking import get_keras_mask
from keras.src.backend.common.masking import set_keras_mask
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.stateless_scope import get_stateless_scope
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.common.symbolic_scope import in_symbolic_scope
from keras.src.backend.common.variables import AutocastScope
from keras.src.backend.common.variables import get_autocast_scope
from keras.src.backend.common.variables import is_float_dtype
from keras.src.backend.common.variables import is_int_dtype
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.common.variables import standardize_shape
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.backend.config import standardize_data_format
# Import backend functions.
if backend() == "tensorflow":
from keras.src.backend.tensorflow import * # noqa: F403
elif backend() == "jax":
from keras.src.backend.jax import * # noqa: F403
elif backend() == "torch":
from keras.src.backend.torch import * # noqa: F403
distribution_lib = None
elif backend() == "numpy":
from keras.src.backend.numpy import * # noqa: F403
distribution_lib = None
else:
raise ValueError(f"Unable to import backend : {backend()}")
BackendVariable = Variable # noqa: F405
@keras_export("keras.Variable")
class Variable(BackendVariable):
pass
backend_name_scope = name_scope # noqa: F405
@keras_export("keras.name_scope")
class name_scope(backend_name_scope):
pass
@keras_export("keras.device")
def device(device_name):
return device_scope(device_name) # noqa: F405
|
from __future__ import annotations
from typing import Any
import PIL.Image
import torch
from ._tv_tensor import TVTensor
class Mask(TVTensor):
""":class:`torch.Tensor` subclass for segmentation and detection masks with shape ``[..., H, W]``.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the mask is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: torch.dtype | None = None,
device: torch.device | str | int | None = None,
requires_grad: bool | None = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return tensor.as_subclass(cls)
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._tv_tensor import TVTensor
class Mask(TVTensor):
""":class:`torch.Tensor` subclass for segmentation and detection masks with shape ``[..., H, W]``.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the mask is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return tensor.as_subclass(cls)
|
"""Test Aleph Alpha specific stuff."""
import pytest
from pydantic import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_community.llms.aleph_alpha import AlephAlpha
@pytest.mark.requires("aleph_alpha_client")
def test_api_key_is_secret_string() -> None:
llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore[arg-type]
assert isinstance(llm.aleph_alpha_api_key, SecretStr)
@pytest.mark.requires("aleph_alpha_client")
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore[arg-type]
print(llm.aleph_alpha_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
@pytest.mark.requires("aleph_alpha_client")
def test_api_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
monkeypatch.setenv("ALEPH_ALPHA_API_KEY", "secret-api-key")
llm = AlephAlpha()
print(llm.aleph_alpha_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
|
"""Test Aleph Alpha specific stuff."""
import pytest
from pydantic import SecretStr
from pytest import CaptureFixture, MonkeyPatch
from langchain_community.llms.aleph_alpha import AlephAlpha
@pytest.mark.requires("aleph_alpha_client")
def test_api_key_is_secret_string() -> None:
llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore[arg-type]
assert isinstance(llm.aleph_alpha_api_key, SecretStr)
@pytest.mark.requires("aleph_alpha_client")
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore[arg-type]
print(llm.aleph_alpha_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
@pytest.mark.requires("aleph_alpha_client")
def test_api_key_masked_when_passed_from_env(
monkeypatch: MonkeyPatch, capsys: CaptureFixture
) -> None:
monkeypatch.setenv("ALEPH_ALPHA_API_KEY", "secret-api-key")
llm = AlephAlpha() # type: ignore[call-arg]
print(llm.aleph_alpha_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
|
from __future__ import annotations
from .BinaryClassificationEvaluator import BinaryClassificationEvaluator
from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator
from .InformationRetrievalEvaluator import InformationRetrievalEvaluator
from .LabelAccuracyEvaluator import LabelAccuracyEvaluator
from .MSEEvaluator import MSEEvaluator
from .MSEEvaluatorFromDataFrame import MSEEvaluatorFromDataFrame
from .NanoBEIREvaluator import NanoBEIREvaluator
from .ParaphraseMiningEvaluator import ParaphraseMiningEvaluator
from .RerankingEvaluator import RerankingEvaluator
from .SentenceEvaluator import SentenceEvaluator
from .SequentialEvaluator import SequentialEvaluator
from .SimilarityFunction import SimilarityFunction
from .TranslationEvaluator import TranslationEvaluator
from .TripletEvaluator import TripletEvaluator
__all__ = [
"SentenceEvaluator",
"SimilarityFunction",
"BinaryClassificationEvaluator",
"EmbeddingSimilarityEvaluator",
"InformationRetrievalEvaluator",
"LabelAccuracyEvaluator",
"MSEEvaluator",
"MSEEvaluatorFromDataFrame",
"ParaphraseMiningEvaluator",
"SequentialEvaluator",
"TranslationEvaluator",
"TripletEvaluator",
"RerankingEvaluator",
"NanoBEIREvaluator",
]
|
from __future__ import annotations
from .BinaryClassificationEvaluator import BinaryClassificationEvaluator
from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator
from .InformationRetrievalEvaluator import InformationRetrievalEvaluator
from .LabelAccuracyEvaluator import LabelAccuracyEvaluator
from .MSEEvaluator import MSEEvaluator
from .MSEEvaluatorFromDataFrame import MSEEvaluatorFromDataFrame
from .ParaphraseMiningEvaluator import ParaphraseMiningEvaluator
from .RerankingEvaluator import RerankingEvaluator
from .SentenceEvaluator import SentenceEvaluator
from .SequentialEvaluator import SequentialEvaluator
from .SimilarityFunction import SimilarityFunction
from .TranslationEvaluator import TranslationEvaluator
from .TripletEvaluator import TripletEvaluator
__all__ = [
"SentenceEvaluator",
"SimilarityFunction",
"BinaryClassificationEvaluator",
"EmbeddingSimilarityEvaluator",
"InformationRetrievalEvaluator",
"LabelAccuracyEvaluator",
"MSEEvaluator",
"MSEEvaluatorFromDataFrame",
"ParaphraseMiningEvaluator",
"SequentialEvaluator",
"TranslationEvaluator",
"TripletEvaluator",
"RerankingEvaluator",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.streamlit.mutable_expander import (
ChildRecord,
ChildType,
MutableExpander,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ChildType": "langchain_community.callbacks.streamlit.mutable_expander",
"ChildRecord": "langchain_community.callbacks.streamlit.mutable_expander",
"MutableExpander": "langchain_community.callbacks.streamlit.mutable_expander",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ChildRecord",
"ChildType",
"MutableExpander",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.streamlit.mutable_expander import (
ChildRecord,
ChildType,
MutableExpander,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ChildType": "langchain_community.callbacks.streamlit.mutable_expander",
"ChildRecord": "langchain_community.callbacks.streamlit.mutable_expander",
"MutableExpander": "langchain_community.callbacks.streamlit.mutable_expander",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ChildType",
"ChildRecord",
"MutableExpander",
]
|
"""Cassandra-based chat message history, based on cassIO."""
from __future__ import annotations
import json
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence
from langchain_community.utilities.cassandra import SetupMode
if TYPE_CHECKING:
from cassandra.cluster import Session
from cassio.table.table_types import RowType
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
DEFAULT_TABLE_NAME = "message_store"
DEFAULT_TTL_SECONDS = None
def _rows_to_messages(rows: Iterable[RowType]) -> List[BaseMessage]:
message_blobs = [row["body_blob"] for row in rows][::-1]
items = [json.loads(message_blob) for message_blob in message_blobs]
messages = messages_from_dict(items)
return messages
class CassandraChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that is backed by Cassandra."""
def __init__(
self,
session_id: str,
session: Optional[Session] = None,
keyspace: Optional[str] = None,
table_name: str = DEFAULT_TABLE_NAME,
ttl_seconds: Optional[int] = DEFAULT_TTL_SECONDS,
*,
setup_mode: SetupMode = SetupMode.SYNC,
) -> None:
"""
Initialize a new instance of CassandraChatMessageHistory.
Args:
session_id: arbitrary key that is used to store the messages
of a single chat session.
session: Cassandra driver session.
If not provided, it is resolved from cassio.
keyspace: Cassandra key space. If not provided, it is resolved from cassio.
table_name: name of the table to use.
ttl_seconds: time-to-live (seconds) for automatic expiration
of stored entries. None (default) for no expiration.
setup_mode: mode used to create the Cassandra table (SYNC, ASYNC or OFF).
"""
try:
from cassio.table import ClusteredCassandraTable
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
self.session_id = session_id
self.ttl_seconds = ttl_seconds
kwargs: Dict[str, Any] = {}
if setup_mode == SetupMode.ASYNC:
kwargs["async_setup"] = True
self.table = ClusteredCassandraTable(
session=session,
keyspace=keyspace,
table=table_name,
ttl_seconds=ttl_seconds,
primary_key_type=["TEXT", "TIMEUUID"],
ordering_in_partition="DESC",
skip_provisioning=setup_mode == SetupMode.OFF,
**kwargs,
)
@property
def messages(self) -> List[BaseMessage]: # type: ignore[override]
"""Retrieve all session messages from DB"""
# The latest are returned, in chronological order
rows = self.table.get_partition(
partition_id=self.session_id,
)
return _rows_to_messages(rows)
async def aget_messages(self) -> List[BaseMessage]:
"""Retrieve all session messages from DB"""
# The latest are returned, in chronological order
rows = await self.table.aget_partition(
partition_id=self.session_id,
)
return _rows_to_messages(rows)
def add_message(self, message: BaseMessage) -> None:
"""Write a message to the table
Args:
message: A message to write.
"""
this_row_id = uuid.uuid4()
self.table.put(
partition_id=self.session_id,
row_id=this_row_id,
body_blob=json.dumps(message_to_dict(message)),
ttl_seconds=self.ttl_seconds,
)
async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None:
for message in messages:
this_row_id = uuid.uuid4()
await self.table.aput(
partition_id=self.session_id,
row_id=this_row_id,
body_blob=json.dumps(message_to_dict(message)),
ttl_seconds=self.ttl_seconds,
)
def clear(self) -> None:
"""Clear session memory from DB"""
self.table.delete_partition(self.session_id)
async def aclear(self) -> None:
"""Clear session memory from DB"""
await self.table.adelete_partition(self.session_id)
|
"""Cassandra-based chat message history, based on cassIO."""
from __future__ import annotations
import json
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence
from langchain_community.utilities.cassandra import SetupMode
if TYPE_CHECKING:
from cassandra.cluster import Session
from cassio.table.table_types import RowType
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
DEFAULT_TABLE_NAME = "message_store"
DEFAULT_TTL_SECONDS = None
def _rows_to_messages(rows: Iterable[RowType]) -> List[BaseMessage]:
message_blobs = [row["body_blob"] for row in rows][::-1]
items = [json.loads(message_blob) for message_blob in message_blobs]
messages = messages_from_dict(items)
return messages
class CassandraChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that is backed by Cassandra."""
def __init__(
self,
session_id: str,
session: Optional[Session] = None,
keyspace: Optional[str] = None,
table_name: str = DEFAULT_TABLE_NAME,
ttl_seconds: Optional[int] = DEFAULT_TTL_SECONDS,
*,
setup_mode: SetupMode = SetupMode.SYNC,
) -> None:
"""
Initialize a new instance of CassandraChatMessageHistory.
Args:
session_id: arbitrary key that is used to store the messages
of a single chat session.
session: Cassandra driver session.
If not provided, it is resolved from cassio.
keyspace: Cassandra key space. If not provided, it is resolved from cassio.
table_name: name of the table to use.
ttl_seconds: time-to-live (seconds) for automatic expiration
of stored entries. None (default) for no expiration.
setup_mode: mode used to create the Cassandra table (SYNC, ASYNC or OFF).
"""
try:
from cassio.table import ClusteredCassandraTable
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
)
self.session_id = session_id
self.ttl_seconds = ttl_seconds
kwargs: Dict[str, Any] = {}
if setup_mode == SetupMode.ASYNC:
kwargs["async_setup"] = True
self.table = ClusteredCassandraTable(
session=session,
keyspace=keyspace,
table=table_name,
ttl_seconds=ttl_seconds,
primary_key_type=["TEXT", "TIMEUUID"],
ordering_in_partition="DESC",
skip_provisioning=setup_mode == SetupMode.OFF,
**kwargs,
)
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve all session messages from DB"""
# The latest are returned, in chronological order
rows = self.table.get_partition(
partition_id=self.session_id,
)
return _rows_to_messages(rows)
async def aget_messages(self) -> List[BaseMessage]:
"""Retrieve all session messages from DB"""
# The latest are returned, in chronological order
rows = await self.table.aget_partition(
partition_id=self.session_id,
)
return _rows_to_messages(rows)
def add_message(self, message: BaseMessage) -> None:
"""Write a message to the table
Args:
message: A message to write.
"""
this_row_id = uuid.uuid4()
self.table.put(
partition_id=self.session_id,
row_id=this_row_id,
body_blob=json.dumps(message_to_dict(message)),
ttl_seconds=self.ttl_seconds,
)
async def aadd_messages(self, messages: Sequence[BaseMessage]) -> None:
for message in messages:
this_row_id = uuid.uuid4()
await self.table.aput(
partition_id=self.session_id,
row_id=this_row_id,
body_blob=json.dumps(message_to_dict(message)),
ttl_seconds=self.ttl_seconds,
)
def clear(self) -> None:
"""Clear session memory from DB"""
self.table.delete_partition(self.session_id)
async def aclear(self) -> None:
"""Clear session memory from DB"""
await self.table.adelete_partition(self.session_id)
|
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementation"""
def __init__(
self,
port: Optional[int] = None,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param port: The port of the Gateway, which the client should connect to.
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.port = port
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
from jina.helper import extend_rest_interface
self.app = extend_rest_interface(
get_fastapi_app(
streamer=self.streamer,
logger=self.logger,
)
)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=__default_host__,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def teardown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
"""
Stop WebSocket server
"""
self.server.should_exit = True
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
@property
def should_exit(self) -> bool:
"""
Boolean flag that indicates whether the gateway server should exit or not
:return: boolean flag
"""
return self.server.should_exit
|
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from ....gateway import BaseGateway
from . import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementation"""
def __init__(
self,
port: Optional[int] = None,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
**kwargs
):
"""Initialize the gateway
:param port: The port of the Gateway, which the client should connect to.
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.port = port
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
async def setup_server(self):
"""
Setup WebSocket Server
"""
from jina.helper import extend_rest_interface
self.app = extend_rest_interface(
get_fastapi_app(
streamer=self.streamer,
logger=self.logger,
)
)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=__default_host__,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def teardown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
"""
Stop WebSocket server
"""
self.server.should_exit = True
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
@property
def should_exit(self) -> bool:
"""
Boolean flag that indicates whether the gateway server should exit or not
:return: boolean flag
"""
return self.server.should_exit
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .layers import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .mot import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
from .task_modules import * # noqa: F401,F403
from .test_time_augs import * # noqa: F401,F403
from .trackers import * # noqa: F401,F403
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .layers import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
from .task_modules import * # noqa: F401,F403
from .test_time_augs import * # noqa: F401,F403
|
from langchain_core.tracers.log_stream import (
LogEntry,
LogStreamCallbackHandler,
RunLog,
RunLogPatch,
RunState,
)
__all__ = ["LogEntry", "LogStreamCallbackHandler", "RunLog", "RunLogPatch", "RunState"]
|
from langchain_core.tracers.log_stream import (
LogEntry,
LogStreamCallbackHandler,
RunLog,
RunLogPatch,
RunState,
)
__all__ = ["LogEntry", "RunState", "RunLog", "RunLogPatch", "LogStreamCallbackHandler"]
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
from docarray.typing.bytes.video_bytes import VideoBytes, VideoLoadResult
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='VideoUrl')
VIDEO_FILE_FORMATS = ['mp4']
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_video_extension = any(ext in url for ext in VIDEO_FILE_FORMATS)
if not has_video_extension:
raise ValueError(
f'Video URL must have one of the following extensions:'
f'{VIDEO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a named Tuple of VideoNdArray, AudioNdArray and
NdArray.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDoc):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
```
---
You can load only the key frames (or video, audio respectively):
---
```python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described in:
https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open
:return: AudioNdArray representing the audio content, VideoNdArray representing
the images of the video, NdArray of the key frame indices.
"""
buffer = self.load_bytes(**kwargs)
return buffer.load()
def load_bytes(self, timeout: Optional[float] = None) -> VideoBytes:
"""
Convert url to VideoBytes. This will either load or download the file and save
it into an VideoBytes object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: VideoBytes object
"""
bytes_ = super().load_bytes(timeout=timeout)
return VideoBytes(bytes_)
def display(self):
"""
Play video from url in notebook.
"""
if is_notebook():
from IPython.display import display
remote_url = True if self.startswith('http') else False
if remote_url:
from IPython.display import Video
b = self.load_bytes()
display(Video(data=b, embed=True, mimetype='video/mp4'))
else:
import os
from IPython.display import HTML
path = os.path.relpath(self)
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{path}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src))
else:
warnings.warn('Display of video is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
from docarray.typing.bytes.video_bytes import VideoLoadResult
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='VideoUrl')
VIDEO_FILE_FORMATS = ['mp4']
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_video_extension = any(ext in url for ext in VIDEO_FILE_FORMATS)
if not has_video_extension:
raise ValueError(
f'Video URL must have one of the following extensions:'
f'{VIDEO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a named Tuple of VideoNdArray, AudioNdArray and
NdArray.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDoc):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
```
---
You can load only the key frames (or video, audio respectively):
---
```python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described in:
https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open
:return: AudioNdArray representing the audio content, VideoNdArray representing
the images of the video, NdArray of the key frame indices.
"""
from docarray.typing.bytes.video_bytes import VideoBytes
buffer = VideoBytes(self.load_bytes(**kwargs))
return buffer.load()
def display(self):
"""
Play video from url in notebook.
"""
if is_notebook():
from IPython.display import display
remote_url = True if self.startswith('http') else False
if remote_url:
from IPython.display import Video
b = self.load_bytes()
display(Video(data=b, embed=True, mimetype='video/mp4'))
else:
import os
from IPython.display import HTML
path = os.path.relpath(self)
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{path}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src))
else:
warnings.warn('Display of video is only possible in a notebook.')
|
from prisma.models import User
from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock
from backend.blocks.text import FillTextTemplateBlock
from backend.data import graph
from backend.data.graph import create_graph
from backend.data.user import get_or_create_user
from backend.util.test import SpinTestServer, wait_execution
async def create_test_user(alt_user: bool = False) -> User:
if alt_user:
test_user_data = {
"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1b",
"email": "testuser2@example.com",
"name": "Test User 2",
}
else:
test_user_data = {
"sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1",
"email": "testuser@example.com",
"name": "Test User",
}
user = await get_or_create_user(test_user_data)
return user
def create_test_graph() -> graph.Graph:
"""
InputBlock
\
---- FillTextTemplateBlock ---- PrintToConsoleBlock
/
InputBlock
"""
nodes = [
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_1"},
),
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_2"},
),
graph.Node(
block_id=FillTextTemplateBlock().id,
input_default={
"format": "{{a}}, {{b}}{{c}}",
"values_#_c": "!!!",
},
),
graph.Node(block_id=PrintToConsoleBlock().id),
]
links = [
graph.Link(
source_id=nodes[0].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_a",
),
graph.Link(
source_id=nodes[1].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_b",
),
graph.Link(
source_id=nodes[2].id,
sink_id=nodes[3].id,
source_name="output",
sink_name="text",
),
]
return graph.Graph(
name="TestGraph",
description="Test graph",
nodes=nodes,
links=links,
)
async def sample_agent():
async with SpinTestServer() as server:
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = await server.agent_server.test_execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
result = await wait_execution(
test_user.id, test_graph.id, response.graph_exec_id, 10
)
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(sample_agent())
|
from prisma.models import User
from backend.blocks.basic import AgentInputBlock, PrintToConsoleBlock
from backend.blocks.text import FillTextTemplateBlock
from backend.data import graph
from backend.data.graph import create_graph
from backend.data.user import get_or_create_user
from backend.util.test import SpinTestServer, wait_execution
async def create_test_user(alt_user: bool = False) -> User:
if alt_user:
test_user_data = {
"sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1b",
"email": "testuser2@example.com",
"name": "Test User 2",
}
else:
test_user_data = {
"sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1",
"email": "testuser@example.com",
"name": "Test User",
}
user = await get_or_create_user(test_user_data)
return user
def create_test_graph() -> graph.Graph:
"""
InputBlock
\
---- FillTextTemplateBlock ---- PrintToConsoleBlock
/
InputBlock
"""
nodes = [
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_1"},
),
graph.Node(
block_id=AgentInputBlock().id,
input_default={"name": "input_2"},
),
graph.Node(
block_id=FillTextTemplateBlock().id,
input_default={
"format": "{{a}}, {{b}}{{c}}",
"values_#_c": "!!!",
},
),
graph.Node(block_id=PrintToConsoleBlock().id),
]
links = [
graph.Link(
source_id=nodes[0].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_a",
),
graph.Link(
source_id=nodes[1].id,
sink_id=nodes[2].id,
source_name="result",
sink_name="values_#_b",
),
graph.Link(
source_id=nodes[2].id,
sink_id=nodes[3].id,
source_name="output",
sink_name="text",
),
]
return graph.Graph(
name="TestGraph",
description="Test graph",
nodes=nodes,
links=links,
)
async def sample_agent():
async with SpinTestServer() as server:
test_user = await create_test_user()
test_graph = await create_graph(create_test_graph(), test_user.id)
input_data = {"input_1": "Hello", "input_2": "World"}
response = await server.agent_server.test_execute_graph(
test_graph.id, input_data, test_user.id
)
print(response)
result = await wait_execution(test_user.id, test_graph.id, response["id"], 10)
print(result)
if __name__ == "__main__":
import asyncio
asyncio.run(sample_agent())
|
"""XGBoost: eXtreme Gradient Boosting library.
Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md
"""
from . import tracker # noqa
from . import collective, dask
from .core import (
Booster,
DataIter,
DeviceQuantileDMatrix,
DMatrix,
QuantileDMatrix,
_py_version,
build_info,
)
from .tracker import RabitTracker # noqa
from .training import cv, train
try:
from .config import config_context, get_config, set_config
from .plotting import plot_importance, plot_tree, to_graphviz
from .sklearn import (
XGBClassifier,
XGBModel,
XGBRanker,
XGBRegressor,
XGBRFClassifier,
XGBRFRegressor,
)
except ImportError:
pass
__version__ = _py_version()
__all__ = [
# core
"DMatrix",
"DeviceQuantileDMatrix",
"QuantileDMatrix",
"Booster",
"DataIter",
"train",
"cv",
# utilities
"RabitTracker",
"build_info",
"plot_importance",
"plot_tree",
"to_graphviz",
"set_config",
"get_config",
"config_context",
# sklearn
"XGBModel",
"XGBClassifier",
"XGBRegressor",
"XGBRanker",
"XGBRFClassifier",
"XGBRFRegressor",
# dask
"dask",
# collective
"collective",
]
|
"""XGBoost: eXtreme Gradient Boosting library.
Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md
"""
from . import tracker # noqa
from . import collective, dask, rabit
from .core import (
Booster,
DataIter,
DeviceQuantileDMatrix,
DMatrix,
QuantileDMatrix,
_py_version,
build_info,
)
from .tracker import RabitTracker # noqa
from .training import cv, train
try:
from .config import config_context, get_config, set_config
from .plotting import plot_importance, plot_tree, to_graphviz
from .sklearn import (
XGBClassifier,
XGBModel,
XGBRanker,
XGBRegressor,
XGBRFClassifier,
XGBRFRegressor,
)
except ImportError:
pass
__version__ = _py_version()
__all__ = [
# core
"DMatrix",
"DeviceQuantileDMatrix",
"QuantileDMatrix",
"Booster",
"DataIter",
"train",
"cv",
# utilities
"RabitTracker",
"build_info",
"plot_importance",
"plot_tree",
"to_graphviz",
"set_config",
"get_config",
"config_context",
# sklearn
"XGBModel",
"XGBClassifier",
"XGBRegressor",
"XGBRanker",
"XGBRFClassifier",
"XGBRFRegressor",
# dask
"dask",
# collective
"collective",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import List, Optional, Sequence
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradients of each parameter. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here. Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optim_wrapper.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.message_hub.update_scalar('train/grad_norm',
float(grad_norm))
runner.optim_wrapper.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import List, Optional, Sequence
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradients of each parameter. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here. Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.message_hub.update_scalar('train/grad_norm',
float(grad_norm))
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
"""
Quickly verify that a list of Python files can be loaded by the Python interpreter
without raising any errors. Ran before running more expensive tests. Useful in
Makefiles.
If loading a file fails, the script prints the problematic filename and the detailed
error traceback.
"""
import random
import string
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
module_name = "".join(
random.choice(string.ascii_letters) # noqa: S311
for _ in range(20)
)
SourceFileLoader(module_name, file).load_module()
except Exception:
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
|
import random
import string
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
module_name = "".join(
random.choice(string.ascii_letters) for _ in range(20)
)
SourceFileLoader(module_name, file).load_module()
except Exception:
has_failure = True
print(file) # noqa: T201
traceback.print_exc()
print() # noqa: T201
sys.exit(1 if has_failure else 0)
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import (
_FillType,
_get_fill,
_setup_fill_arg,
_setup_size,
get_bounding_boxes,
has_any,
is_pure_tensor,
query_size,
)
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_pure_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_bounding_box_format(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop,
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = datapoints.wrap(inpt[params["is_valid"]], like=inpt)
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.wrap(
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
like=inpt,
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _FillType, _get_fill, _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import get_bounding_boxes, has_any, is_pure_tensor, query_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_pure_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_bounding_box_format(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop,
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = datapoints.wrap(inpt[params["is_valid"]], like=inpt)
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.wrap(
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
like=inpt,
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
"""Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
class TestHuggingFaceEndpoint(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatHuggingFace
@property
def chat_model_params(self) -> dict:
return {}
@pytest.fixture
def model(self) -> BaseChatModel:
llm = HuggingFaceEndpoint( # type: ignore[call-arg]
repo_id="HuggingFaceH4/zephyr-7b-beta",
task="text-generation",
max_new_tokens=512,
do_sample=False,
repetition_penalty=1.03,
)
return self.chat_model_class(llm=llm) # type: ignore[call-arg]
@pytest.mark.xfail(reason=("Not implemented"))
def test_stream(self, model: BaseChatModel) -> None:
super().test_stream(model)
@pytest.mark.xfail(reason=("Not implemented"))
async def test_astream(self, model: BaseChatModel) -> None:
await super().test_astream(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_usage_metadata(self, model: BaseChatModel) -> None:
super().test_usage_metadata(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_stop_sequence(self, model: BaseChatModel) -> None:
super().test_stop_sequence(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_tool_calling(self, model: BaseChatModel) -> None:
super().test_tool_calling(model)
@pytest.mark.xfail(reason=("Not implemented"))
async def test_tool_calling_async(self, model: BaseChatModel) -> None:
await super().test_tool_calling_async(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None:
super().test_tool_calling_with_no_arguments(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_bind_runnables_as_tools(self, model: BaseChatModel) -> None:
super().test_bind_runnables_as_tools(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_structured_output(self, model: BaseChatModel, schema_type: str) -> None:
super().test_structured_output(model, schema_type)
@pytest.mark.xfail(reason=("Not implemented"))
async def test_structured_output_async(
self, model: BaseChatModel, schema_type: str
) -> None: # type: ignore[override]
super().test_structured_output(model, schema_type)
@pytest.mark.xfail(reason=("Not implemented"))
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
super().test_structured_output_pydantic_2_v1(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_structured_output_optional_param(self, model: BaseChatModel) -> None:
super().test_structured_output_optional_param(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(
model, my_adder_tool=my_adder_tool
)
@pytest.mark.xfail(reason=("Not implemented"))
def test_structured_few_shot_examples(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_structured_few_shot_examples(model, my_adder_tool=my_adder_tool)
|
"""Standard LangChain interface tests"""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
class TestHuggingFaceEndpoint(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatHuggingFace
@property
def chat_model_params(self) -> dict:
return {}
@pytest.fixture
def model(self) -> BaseChatModel:
llm = HuggingFaceEndpoint( # type: ignore[call-arg]
repo_id="HuggingFaceH4/zephyr-7b-beta",
task="text-generation",
max_new_tokens=512,
do_sample=False,
repetition_penalty=1.03,
)
return self.chat_model_class(llm=llm) # type: ignore[call-arg]
@pytest.mark.xfail(reason=("Not implemented"))
def test_stream(self, model: BaseChatModel) -> None:
super().test_stream(model)
@pytest.mark.xfail(reason=("Not implemented"))
async def test_astream(self, model: BaseChatModel) -> None:
await super().test_astream(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_usage_metadata(self, model: BaseChatModel) -> None:
super().test_usage_metadata(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_stop_sequence(self, model: BaseChatModel) -> None:
super().test_stop_sequence(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_tool_calling(self, model: BaseChatModel) -> None:
super().test_tool_calling(model)
@pytest.mark.xfail(reason=("Not implemented"))
async def test_tool_calling_async(self, model: BaseChatModel) -> None:
await super().test_tool_calling_async(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None:
super().test_tool_calling_with_no_arguments(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_bind_runnables_as_tools(self, model: BaseChatModel) -> None:
super().test_bind_runnables_as_tools(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_structured_output(self, model: BaseChatModel, schema_type: str) -> None:
super().test_structured_output(model, schema_type)
@pytest.mark.xfail(reason=("Not implemented"))
async def test_structured_output_async(
self, model: BaseChatModel, schema_type: str
) -> None: # type: ignore[override]
super().test_structured_output(model, schema_type)
@pytest.mark.xfail(reason=("Not implemented"))
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
super().test_structured_output_pydantic_2_v1(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_structured_output_optional_param(self, model: BaseChatModel) -> None:
super().test_structured_output_optional_param(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(
model, my_adder_tool=my_adder_tool
)
@pytest.mark.xfail(reason=("Not implemented"))
def test_structured_few_shot_examples(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_structured_few_shot_examples(model, my_adder_tool=my_adder_tool)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.src.backend import Variable
from keras.src.backend import device
from keras.src.backend import name_scope
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
"""LLM Compiler agent pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.agent import AgentRunner
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.llms.llm import LLM
from llama_index.core.settings import Settings
from llama_index.core.tools.types import BaseTool
from .step import CoAAgentWorker
class CoAAgentPack(BaseLlamaPack):
"""
Chain-of-abstraction Agent Pack.
Args:
tools (List[BaseTool]): List of tools to use.
llm (Optional[LLM]): LLM to use. Defaults to gpt-4.
"""
def __init__(
self,
tools: List[BaseTool],
llm: Optional[LLM] = None,
callback_manager: Optional[CallbackManager] = None,
agent_worker_kwargs: Optional[Dict[str, Any]] = None,
agent_runner_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
"""Init params."""
self.llm = llm or Settings.llm
self.callback_manager = callback_manager or self.llm.callback_manager
self.agent_worker = CoAAgentWorker.from_tools(
tools=tools,
llm=llm,
verbose=True,
callback_manager=self.callback_manager,
**(agent_worker_kwargs or {}),
)
self.agent = AgentRunner(
self.agent_worker,
callback_manager=self.callback_manager,
**(agent_runner_kwargs or {}),
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"llm": self.llm,
"callback_manager": self.callback_manager,
"agent_worker": self.agent_worker,
"agent": self.agent,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.agent.chat(*args, **kwargs)
|
"""LLM Compiler agent pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.agent import AgentRunner
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.llms.llm import LLM
from llama_index.core.settings import Settings
from llama_index.core.tools.types import BaseTool
from .step import CoAAgentWorker
class CoAAgentPack(BaseLlamaPack):
"""
Chain-of-abstraction Agent Pack.
Args:
tools (List[BaseTool]): List of tools to use.
llm (Optional[LLM]): LLM to use. Defaults to gpt-4.
"""
def __init__(
self,
tools: List[BaseTool],
llm: Optional[LLM] = None,
callback_manager: Optional[CallbackManager] = None,
agent_worker_kwargs: Optional[Dict[str, Any]] = None,
agent_runner_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
"""Init params."""
self.llm = llm or Settings.llm
self.callback_manager = callback_manager or self.llm.callback_manager
self.agent_worker = CoAAgentWorker.from_tools(
tools=tools,
llm=llm,
verbose=True,
callback_manager=self.callback_manager,
**(agent_worker_kwargs or {})
)
self.agent = AgentRunner(
self.agent_worker,
callback_manager=self.callback_manager,
**(agent_runner_kwargs or {})
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"llm": self.llm,
"callback_manager": self.callback_manager,
"agent_worker": self.agent_worker,
"agent": self.agent,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.agent.chat(*args, **kwargs)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTorchTensor')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_torch_tensor')
class VideoTorchTensor(TorchTensor, VideoTensorMixin, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import VideoTorchTensor, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoTorchTensor]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=torch.randn(size=(100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.wav')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.video_tensor = parse_obj_as(VideoTorchTensor, doc_2.url.load())
doc_2.video_tensor.save(file_path='file_2.wav')
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTorchTensor')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_torch_tensor')
class VideoTorchTensor(TorchTensor, VideoTensorMixin, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.typing import VideoTorchTensor, VideoUrl
class MyVideoDoc(BaseDocument):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoTorchTensor]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=torch.randn(size=(100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.wav')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.video_tensor = parse_obj_as(VideoTorchTensor, doc_2.url.load())
doc_2.video_tensor.save(file_path='file_2.wav')
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
"""
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
if not name:
return metrics
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
"""
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
if not name:
return metrics
metrics = {name + "_" + key: float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Remove "Evaluator" from the class name
2. Add a space before every capital letter
"""
class_name = self.__class__.__name__
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
|
from typing import Dict, Tuple
import torch
def get_versions() -> Dict[str, Tuple[int]]:
"""Get the versions of FFmpeg libraries
Returns:
dict: mapping from library names to version string,
i.e. `"libavutil": (56, 22, 100)`.
"""
return torch.ops.torchaudio.ffmpeg_get_versions()
def get_log_level() -> int:
"""Get the log level of FFmpeg.
See :py:func:`set_log_level` for the detailo.
"""
return torch.ops.torchaudio.ffmpeg_get_log_level()
def set_log_level(level: int):
"""Set the log level of FFmpeg (libavformat etc)
Arguments:
level (int): Log level. The larger, the more verbose.
The following values are common values, the corresponding ``ffmpeg``'s
``-loglevel`` option value and desription.
* ``-8`` (``quiet``):
Print no output.
* ``0`` (``panic``):
Something went really wrong and we will crash now.
* ``8`` (``fatal``):
Something went wrong and recovery is not possible.
For example, no header was found for a format which depends
on headers or an illegal combination of parameters is used.
* ``16`` (``error``):
Something went wrong and cannot losslessly be recovered.
However, not all future data is affected.
* ``24`` (``warning``):
Something somehow does not look correct.
This may or may not lead to problems.
* ``32`` (``info``):
Standard information.
* ``40`` (``verbose``):
Detailed information.
* ``48`` (``debug``):
Stuff which is only useful for libav* developers.
* ``56`` (``trace``):
Extremely verbose debugging, useful for libav* development.
"""
torch.ops.torchaudio.ffmpeg_set_log_level(level)
|
import torch
def get_log_level() -> int:
"""Get the log level of FFmpeg.
See :py:func:`set_log_level` for the detailo.
"""
return torch.ops.torchaudio.ffmpeg_get_log_level()
def set_log_level(level: int):
"""Set the log level of FFmpeg (libavformat etc)
Arguments:
level (int): Log level. The larger, the more verbose.
The following values are common values, the corresponding ``ffmpeg``'s
``-loglevel`` option value and desription.
* ``-8`` (``quiet``):
Print no output.
* ``0`` (``panic``):
Something went really wrong and we will crash now.
* ``8`` (``fatal``):
Something went wrong and recovery is not possible.
For example, no header was found for a format which depends
on headers or an illegal combination of parameters is used.
* ``16`` (``error``):
Something went wrong and cannot losslessly be recovered.
However, not all future data is affected.
* ``24`` (``warning``):
Something somehow does not look correct.
This may or may not lead to problems.
* ``32`` (``info``):
Standard information.
* ``40`` (``verbose``):
Detailed information.
* ``48`` (``debug``):
Stuff which is only useful for libav* developers.
* ``56`` (``trace``):
Extremely verbose debugging, useful for libav* development.
"""
torch.ops.torchaudio.ffmpeg_set_log_level(level)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
from typing import Optional
import torch
try:
import torch_npu # noqa: F401
import torch_npu.npu.utils as npu_utils
# Enable operator support for dynamic shape and
# binary operator support on the NPU.
npu_jit_compile = bool(os.getenv('NPUJITCompile', False))
torch.npu.set_compile_mode(jit_compile=npu_jit_compile)
IS_NPU_AVAILABLE = hasattr(torch, 'npu') and torch.npu.is_available()
except Exception:
IS_NPU_AVAILABLE = False
try:
import torch_mlu # noqa: F401
IS_MLU_AVAILABLE = hasattr(torch, 'mlu') and torch.mlu.is_available()
except Exception:
IS_MLU_AVAILABLE = False
try:
import torch_dipu # noqa: F401
IS_DIPU_AVAILABLE = True
except Exception:
IS_DIPU_AVAILABLE = False
try:
import torch_musa # noqa: F401
IS_MUSA_AVAILABLE = True
except Exception:
IS_MUSA_AVAILABLE = False
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_npu_available() -> bool:
"""Returns True if Ascend PyTorch and npu devices exist."""
return IS_NPU_AVAILABLE
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return IS_MLU_AVAILABLE
def is_mps_available() -> bool:
"""Return True if mps devices exist.
It's specialized for mac m1 chips and require torch version 1.12 or higher.
"""
return hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
def is_dipu_available() -> bool:
return IS_DIPU_AVAILABLE
def get_max_musa_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.musa.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.musa.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
# TODO:haowen.han@mthreads.com: This function is not supported by musa yet.
# torch.musa.reset_peak_memory_stats()
return int(mem_mb.item())
def is_musa_available() -> bool:
return IS_MUSA_AVAILABLE
def is_npu_support_full_precision() -> bool:
"""Returns True if npu devices support full precision training."""
version_of_support_full_precision = 220
return IS_NPU_AVAILABLE and npu_utils.get_soc_version(
) >= version_of_support_full_precision
DEVICE = 'cpu'
if is_npu_available():
DEVICE = 'npu'
elif is_cuda_available():
DEVICE = 'cuda'
elif is_mlu_available():
DEVICE = 'mlu'
elif is_mps_available():
DEVICE = 'mps'
elif is_dipu_available():
DEVICE = 'dipu'
elif is_musa_available():
DEVICE = 'musa'
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | npu | mlu | mps | musa | cpu.
"""
return DEVICE
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
from typing import Optional
import torch
try:
import torch_npu # noqa: F401
import torch_npu.npu.utils as npu_utils
# Enable operator support for dynamic shape and
# binary operator support on the NPU.
npu_jit_compile = bool(os.getenv('NPUJITCompile', False))
torch.npu.set_compile_mode(jit_compile=npu_jit_compile)
IS_NPU_AVAILABLE = hasattr(torch, 'npu') and torch.npu.is_available()
except Exception:
IS_NPU_AVAILABLE = False
try:
import torch_dipu # noqa: F401
IS_DIPU_AVAILABLE = True
except Exception:
IS_DIPU_AVAILABLE = False
try:
import torch_musa # noqa: F401
IS_MUSA_AVAILABLE = True
except Exception:
IS_MUSA_AVAILABLE = False
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_npu_available() -> bool:
"""Returns True if Ascend PyTorch and npu devices exist."""
return IS_NPU_AVAILABLE
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def is_mps_available() -> bool:
"""Return True if mps devices exist.
It's specialized for mac m1 chips and require torch version 1.12 or higher.
"""
return hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
def is_dipu_available() -> bool:
return IS_DIPU_AVAILABLE
def get_max_musa_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.musa.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.musa.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
# TODO:haowen.han@mthreads.com: This function is not supported by musa yet.
# torch.musa.reset_peak_memory_stats()
return int(mem_mb.item())
def is_musa_available() -> bool:
return IS_MUSA_AVAILABLE
def is_npu_support_full_precision() -> bool:
"""Returns True if npu devices support full precision training."""
version_of_support_full_precision = 220
return IS_NPU_AVAILABLE and npu_utils.get_soc_version(
) >= version_of_support_full_precision
DEVICE = 'cpu'
if is_npu_available():
DEVICE = 'npu'
elif is_cuda_available():
DEVICE = 'cuda'
elif is_mlu_available():
DEVICE = 'mlu'
elif is_mps_available():
DEVICE = 'mps'
elif is_dipu_available():
DEVICE = 'dipu'
elif is_musa_available():
DEVICE = 'musa'
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | npu | mlu | mps | musa | cpu.
"""
return DEVICE
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
SparseEncoderTrainingArguments extends :class:`~SentenceTransformerTrainingArguments` which itself extend
:class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers.
See :class:`~transformers.TrainingArguments` for the complete list of available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
router_mapping (`Optional[Dict[str, str]]`, *optional*):
A mapping of dataset column names to Router routes, like "query" or "document". This is used to specify
which Router submodule to use for each dataset. Two formats are accepted:
1. `Dict[str, str]`: A mapping of column names to routes.
2. `Dict[str, Dict[str, str]]`: A mapping of dataset names to a mapping of column names to routes for
multi-dataset training/evaluation.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter name regular expressions to learning rates. This allows you to set different
learning rates for different parts of the model, e.g., `{'SparseStaticEmbedding\.*': 1e-3}` for the
SparseStaticEmbedding module. This is useful when you want to fine-tune specific parts of the model
with different learning rates.
"""
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
SparseEncoderTrainingArguments extends :class:`~SentenceTransformerTrainingArguments` which itself extend
:class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers.
See :class:`~transformers.TrainingArguments` for the complete list of available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
router_mapping (`Optional[Dict[str, str]]`, *optional*):
A mapping of dataset column names to Router routes, like "query" or "document". This is used to specify
which Router submodule to use for each dataset. Two formats are accepted:
1. `Dict[str, str]`: A mapping of column names to routes.
2. `Dict[str, Dict[str, str]]`: A mapping of dataset names to a mapping of column names to routes for
multi-dataset training/evaluation.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter name regular expressions to learning rates. This allows you to set different
learning rates for different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is
useful when you want to fine-tune specific parts of the model with different learning rates.
"""
|
__version__ = '0.35.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.35.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._tv_tensor import TVTensor
class Mask(TVTensor):
""":class:`torch.Tensor` subclass for segmentation and detection masks with shape ``[..., H, W]``.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the mask is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return tensor.as_subclass(cls)
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._tv_tensor import TVTensor
class Mask(TVTensor):
""":class:`torch.Tensor` subclass for segmentation and detection masks.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the mask is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return tensor.as_subclass(cls)
|
import copy
import clip
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray
from ...clip_text import CLIPTextEncoder
@pytest.fixture(scope="module")
def encoder() -> CLIPTextEncoder:
return CLIPTextEncoder()
def test_no_documents(encoder: CLIPTextEncoder):
docs = DocumentArray()
encoder.encode(docs=DocumentArray(), parameters={})
assert len(docs) == 0
def test_none_docs(encoder: CLIPTextEncoder):
encoder.encode(docs=None, parameters={})
def test_docs_no_texts(encoder: CLIPTextEncoder):
docs = DocumentArray([Document()])
encoder.encode(docs=DocumentArray(), parameters={})
assert len(docs) == 1
assert docs[0].embedding is None
def test_compute_tokens(encoder: CLIPTextEncoder):
tokens = encoder._generate_input_tokens(
["hello this is a test", "and another test"]
)
assert tokens["input_ids"].shape == (2, 7)
assert tokens["attention_mask"].shape == (2, 7)
def test_encoding_cpu():
encoder = CLIPTextEncoder(device="cpu")
input_data = DocumentArray([Document(text="hello world")])
encoder.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (512,)
@pytest.mark.parametrize("batch_size", [1, 2, 4, 8])
def test_batch_size(encoder: CLIPTextEncoder, batch_size: int):
text = "Jina is Lit"
docs = DocumentArray([Document(text=text) for _ in range(32)])
encoder.encode(docs, parameters={"batch_size": batch_size})
for doc in docs:
assert doc.embedding.shape == (512,)
def test_encodes_semantic_meaning():
"""
Check if the distance between embeddings of similar sentences are smaller
than dissimilar pair of sentences.
"""
docs = DocumentArray(
[
Document(id="A", text="a furry animal that with a long tail"),
Document(id="B", text="a domesticated mammal with four legs"),
Document(id="C", text="a type of aircraft that uses rotating wings"),
Document(id="D", text="flying vehicle that has fixed wings and engines"),
]
)
clip_text_encoder = CLIPTextEncoder()
clip_text_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ["B", "A", "D", "C"]
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
def test_openai_embed_match():
docs = []
sentences = [
"Jina AI is lit",
"Jina AI is great",
"Jina AI is a cloud-native neural search company",
"Jina AI is a github repo",
"Jina AI is an open source neural search project",
]
for sentence in sentences:
docs.append(Document(text=sentence))
clip_text_encoder = CLIPTextEncoder("openai/clip-vit-base-patch32")
clip_text_encoder.encode(DocumentArray(docs), {})
txt_to_ndarray = {}
for d in docs:
txt_to_ndarray[d.text] = d.embedding
# assert same results with OpenAI's implementation
model, preprocess = clip.load("ViT-B/32", device="cpu")
assert len(txt_to_ndarray) == 5
for text, actual_embedding in txt_to_ndarray.items():
with torch.no_grad():
tokens = clip.tokenize(text)
expected_embedding = model.encode_text(tokens).detach().numpy().flatten()
np.testing.assert_almost_equal(actual_embedding, expected_embedding, 5)
def test_traversal_path():
text = "blah"
docs = DocumentArray([Document(id="root1", text=text)])
docs[0].chunks = [
Document(id="chunk11", text=text),
Document(id="chunk12", text=text),
Document(id="chunk13", text=text),
]
docs[0].chunks[0].chunks = [
Document(id="chunk111", text=text),
Document(id="chunk112", text=text),
]
encoder = CLIPTextEncoder(default_traversal_paths=["c"], model_name="ViT-B/32")
original_docs = copy.deepcopy(docs)
encoder.encode(docs=docs, parameters={}, return_results=True)
for path, count in [["r", 0], ["c", 3], ["cc", 0]]:
assert len(docs.traverse_flat([path]).get_attributes("embedding")) == count
encoder.encode(
docs=original_docs, parameters={"traversal_paths": ["cc"]}, return_results=True
)
for path, count in [["r", 0], ["c", 0], ["cc", 2]]:
assert (
len(original_docs.traverse_flat([path]).get_attributes("embedding"))
== count
)
|
import clip
import copy
import numpy as np
import torch
from jina import Document, DocumentArray, Executor
from jinahub.encoder.clip_text import CLIPTextEncoder
def test_clip_batch():
test_docs = DocumentArray((Document(text='random text') for _ in range(30)))
clip_text_encoder = CLIPTextEncoder()
parameters = {'batch_size': 10}
clip_text_encoder.encode(test_docs, parameters)
assert 30 == len(test_docs.get_attributes('embedding'))
def test_clip_data():
docs = []
words = ['apple', 'banana1', 'banana2', 'studio', 'satelite', 'airplane']
for word in words:
docs.append(Document(text=word))
sentences = [
'Jina AI is lit',
'Jina AI is great',
'Jina AI is a cloud-native neural search company',
'Jina AI is a github repo',
'Jina AI is an open source neural search project',
]
for sentence in sentences:
docs.append(Document(text=sentence))
clip_text_encoder = CLIPTextEncoder()
clip_text_encoder.encode(DocumentArray(docs), {})
txt_to_ndarray = {}
for d in docs:
txt_to_ndarray[d.text] = d.embedding
def dist(a, b):
nonlocal txt_to_ndarray
a_embedding = txt_to_ndarray[a]
b_embedding = txt_to_ndarray[b]
return np.linalg.norm(a_embedding - b_embedding)
# assert semantic meaning is captured in the encoding
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satelite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
small_distance = dist('Jina AI is lit', 'Jina AI is great')
assert small_distance < dist(
'Jina AI is a cloud-native neural search company', 'Jina AI is a github repo'
)
assert small_distance < dist(
'Jina AI is a cloud-native neural search company',
'Jina AI is an open source neural search project',
)
# assert same results like calculating it manually
model, preprocess = clip.load('ViT-B/32', device='cpu')
assert len(txt_to_ndarray) == 11
for text, actual_embedding in txt_to_ndarray.items():
with torch.no_grad():
tokens = clip.tokenize(text)
expected_embedding = model.encode_text(tokens).detach().numpy().flatten()
np.testing.assert_almost_equal(actual_embedding, expected_embedding, 5)
def test_traversal_path():
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text)
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
encoder = CLIPTextEncoder(default_traversal_paths=['c'], model_name='ViT-B/32')
original_docs = copy.deepcopy(docs)
encoder.encode(docs=docs, parameters={}, return_results=True)
for path, count in [['r', 0], ['c', 3], ['cc', 0]]:
assert len(docs.traverse_flat([path]).get_attributes('embedding')) == count
encoder.encode(docs=original_docs, parameters={'traversal_paths': ['cc']}, return_results=True)
for path, count in [['r', 0], ['c', 0], ['cc', 2]]:
assert len(original_docs.traverse_flat([path]).get_attributes('embedding')) == count
|
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
import csv
import gzip
import os
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
def test_BinaryClassificationEvaluator_find_best_f1_and_threshold() -> None:
"""Tests that the F1 score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
(
best_f1,
best_precision,
best_recall,
threshold,
) = evaluation.BinaryClassificationEvaluator.find_best_f1_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True
)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_f1score = f1_score(y_true, y_pred_labels)
assert np.abs(best_f1 - sklearn_f1score) < 1e-6
def test_BinaryClassificationEvaluator_find_best_accuracy_and_threshold() -> None:
"""Tests that the Acc score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
(
max_acc,
threshold,
) = evaluation.BinaryClassificationEvaluator.find_best_acc_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True
)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_acc = accuracy_score(y_true, y_pred_labels)
assert np.abs(max_acc - sklearn_acc) < 1e-6
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
metrics = evaluator(model)
assert "accuracy" in metrics
assert metrics["accuracy"] > 0.2
def test_ParaphraseMiningEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the ParaphraseMiningEvaluator can be loaded"""
model = paraphrase_distilroberta_base_v1_model
sentences = {
0: "Hello World",
1: "Hello World!",
2: "The cat is on the table",
3: "On the table the cat is",
}
data_eval = evaluation.ParaphraseMiningEvaluator(sentences, [(0, 1), (2, 3)])
metrics = data_eval(model)
assert metrics[data_eval.primary_metric] > 0.99
|
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
import csv
import gzip
import os
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
def test_BinaryClassificationEvaluator_find_best_f1_and_threshold() -> None:
"""Tests that the F1 score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
(
best_f1,
best_precision,
best_recall,
threshold,
) = evaluation.BinaryClassificationEvaluator.find_best_f1_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True
)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_f1score = f1_score(y_true, y_pred_labels)
assert np.abs(best_f1 - sklearn_f1score) < 1e-6
def test_BinaryClassificationEvaluator_find_best_accuracy_and_threshold() -> None:
"""Tests that the Acc score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
(
max_acc,
threshold,
) = evaluation.BinaryClassificationEvaluator.find_best_acc_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True
)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_acc = accuracy_score(y_true, y_pred_labels)
assert np.abs(max_acc - sklearn_acc) < 1e-6
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
acc = evaluator(model)
assert acc > 0.2
def test_ParaphraseMiningEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the ParaphraseMiningEvaluator can be loaded"""
model = paraphrase_distilroberta_base_v1_model
sentences = {
0: "Hello World",
1: "Hello World!",
2: "The cat is on the table",
3: "On the table the cat is",
}
data_eval = evaluation.ParaphraseMiningEvaluator(sentences, [(0, 1), (2, 3)])
score = data_eval(model)
assert score > 0.99
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import sys
import pkg_resources
import pytest
from mmengine.utils import get_installed_path, is_installed
def test_is_installed():
# TODO: Windows CI may failed in unknown reason. Skip check the value
is_installed('mmengine')
# If there is `__init__.py` in the directory which is added into
# `sys.path`, the directory will be recognized as a package.
PYTHONPATH = osp.abspath(
osp.join(osp.dirname(__file__), '..', '..', 'mmengine'))
sys.path.append(PYTHONPATH)
assert is_installed('optim')
sys.path.pop()
def test_get_install_path():
# TODO: Windows CI may failed in unknown reason. Skip check the value
get_installed_path('mmengine')
# get path for package "installed" by setting PYTHONPATH
PYTHONPATH = osp.abspath(osp.join(osp.dirname(__file__), '..'))
PYTHONPATH = osp.abspath(
osp.join(osp.dirname(__file__), '..', '..', 'mmengine'))
sys.path.append(PYTHONPATH)
assert get_installed_path('optim') == osp.join(PYTHONPATH, 'optim')
sys.path.pop()
with pytest.raises(pkg_resources.DistributionNotFound):
get_installed_path('unknown')
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import sys
from pathlib import Path
from mmengine.utils import get_installed_path, is_installed
def test_is_installed():
# TODO: Windows CI may failed in unknown reason. Skip check the value
is_installed('mmengine')
# package set by PYTHONPATH
assert not is_installed('py_config')
sys.path.append(osp.abspath(osp.join(osp.dirname(__file__), '..')))
assert is_installed('test_utils')
sys.path.pop()
def test_get_install_path(tmp_path: Path):
# TODO: Windows CI may failed in unknown reason. Skip check the value
get_installed_path('mmengine')
# get path for package "installed" by setting PYTHONPATH
PYTHONPATH = osp.abspath(osp.join(
osp.dirname(__file__),
'..',
))
sys.path.append(PYTHONPATH)
res_path = get_installed_path('test_utils')
assert osp.join(PYTHONPATH, 'test_utils') == res_path
# return the first path for namespace package
# See more information about namespace package in:
# https://packaging.python.org/en/latest/guides/packaging-namespace-packages/ # noqa:E501
(tmp_path / 'test_utils').mkdir()
sys.path.insert(-1, str(tmp_path))
res_path = get_installed_path('test_utils')
assert osp.abspath(osp.join(tmp_path, 'test_utils')) == res_path
sys.path.pop()
sys.path.pop()
|
"""RunInfo class."""
from __future__ import annotations
from uuid import UUID
from pydantic import BaseModel
class RunInfo(BaseModel):
"""Class that contains metadata for a single execution of a Chain or model.
Defined for backwards compatibility with older versions of langchain_core.
This model will likely be deprecated in the future.
Users can acquire the run_id information from callbacks or via run_id
information present in the astream_event API (depending on the use case).
"""
run_id: UUID
"""A unique identifier for the model or chain run."""
|
from __future__ import annotations
from uuid import UUID
from pydantic import BaseModel
class RunInfo(BaseModel):
"""Class that contains metadata for a single execution of a Chain or model.
Defined for backwards compatibility with older versions of langchain_core.
This model will likely be deprecated in the future.
Users can acquire the run_id information from callbacks or via run_id
information present in the astream_event API (depending on the use case).
"""
run_id: UUID
"""A unique identifier for the model or chain run."""
|
"""
Test of utility functions for working with Search Index commands.
Note that search index commands are only supported on Atlas Clusters >=M10.
"""
import os
from typing import Generator, List, Optional
import pytest
from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch, index
from pymongo import MongoClient
from pymongo.collection import Collection
MONGODB_URI = os.environ.get("MONGODB_URI")
DB_NAME = os.environ.get("MONGODB_DATABASE", "llama_index_test_db")
COLLECTION_NAME = "test_index_commands"
VECTOR_INDEX_NAME = "vector_index"
FULLTEXT_INDEX_NAME = "fulltext_index"
FILTER_FIELD_NAME = "country"
FILTER_FIELD_TYPE = "string"
TIMEOUT = 120
DIMENSIONS = 10
@pytest.fixture()
def collection(vector_store) -> Generator:
"""Depending on uri, this could point to any type of cluster."""
clxn = vector_store.collection
clxn.insert_many([{"year": 2024}, {"country": "Canada"}])
yield clxn
clxn.drop()
@pytest.fixture()
def vector_store(atlas_client: MongoClient) -> MongoDBAtlasVectorSearch:
return MongoDBAtlasVectorSearch(
mongodb_client=atlas_client,
db_name=DB_NAME,
collection_name=COLLECTION_NAME,
vector_index_name=VECTOR_INDEX_NAME,
)
@pytest.mark.skipif(
os.environ.get("MONGODB_URI") is None, reason="Requires MONGODB_URI in os.environ"
)
def test_search_index_commands_standalone(collection: Collection) -> None:
"""Tests create, update, and drop index utility functions."""
index_name = VECTOR_INDEX_NAME
dimensions = DIMENSIONS
path = "embedding"
similarity = "cosine"
filters: Optional[List[str]] = None
wait_until_complete = TIMEOUT
for index_info in collection.list_search_indexes():
index.drop_vector_search_index(
collection, index_info["name"], wait_until_complete=wait_until_complete
)
assert len(list(collection.list_search_indexes())) == 0
# Create a Vector Search Index on index_name
index.create_vector_search_index(
collection=collection,
index_name=index_name,
dimensions=dimensions,
path=path,
similarity=similarity,
filters=filters,
wait_until_complete=wait_until_complete,
)
indexes = list(collection.list_search_indexes())
assert len(indexes) == 1
assert indexes[0]["name"] == index_name
# Update that index by adding a filter
# This will additionally index the "bar" and "foo" fields
# The Update method is not yet supported in Atlas Local.
if "mongodb+srv" in os.environ.get("MONGODB_URI"):
new_similarity = "euclidean"
index.update_vector_search_index(
collection=collection,
index_name=index_name,
dimensions=DIMENSIONS,
path="embedding",
similarity=new_similarity,
filters=[FILTER_FIELD_NAME],
wait_until_complete=wait_until_complete,
)
indexes = list(collection.list_search_indexes())
assert len(indexes) == 1
assert indexes[0]["name"] == index_name
fields = indexes[0]["latestDefinition"]["fields"]
assert len(fields) == 2
assert {"type": "filter", "path": FILTER_FIELD_NAME} in fields
assert {
"numDimensions": DIMENSIONS,
"path": "embedding",
"similarity": f"{new_similarity}",
"type": "vector",
} in fields
# Now add a full-text search index for the filter field
index.create_fulltext_search_index(
collection=collection,
index_name=FULLTEXT_INDEX_NAME,
field=FILTER_FIELD_NAME,
field_type=FILTER_FIELD_TYPE,
wait_until_complete=TIMEOUT,
)
indexes = list(collection.list_search_indexes())
assert len(indexes) == 2
assert any(idx["name"] == FULLTEXT_INDEX_NAME for idx in indexes)
idx_fulltext = (
indexes[0] if indexes[0]["name"] == FULLTEXT_INDEX_NAME else indexes[1]
)
assert idx_fulltext["type"] == "search"
fields = idx_fulltext["latestDefinition"]["mappings"]["fields"]
assert fields[FILTER_FIELD_NAME]["type"] == FILTER_FIELD_TYPE
# Finally, drop the index
for name in [FULLTEXT_INDEX_NAME, VECTOR_INDEX_NAME]:
index.drop_vector_search_index(
collection, name, wait_until_complete=wait_until_complete
)
indexes = list(collection.list_search_indexes())
for idx in indexes:
assert idx["status"] == "DELETING"
|
"""Test of utility functions for working with Search Index commands.
Note that search index commands are only supported on Atlas Clusters >=M10.
"""
import os
from typing import Generator, List, Optional
import pytest
from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch, index
from pymongo import MongoClient
from pymongo.collection import Collection
MONGODB_URI = os.environ.get("MONGODB_URI")
DB_NAME = os.environ.get("MONGODB_DATABASE", "llama_index_test_db")
COLLECTION_NAME = "test_index_commands"
VECTOR_INDEX_NAME = "vector_index"
FULLTEXT_INDEX_NAME = "fulltext_index"
FILTER_FIELD_NAME = "country"
FILTER_FIELD_TYPE = "string"
TIMEOUT = 120
DIMENSIONS = 10
@pytest.fixture()
def collection(vector_store) -> Generator:
"""Depending on uri, this could point to any type of cluster."""
clxn = vector_store.collection
clxn.insert_many([{"year": 2024}, {"country": "Canada"}])
yield clxn
clxn.drop()
@pytest.fixture()
def vector_store(atlas_client: MongoClient) -> MongoDBAtlasVectorSearch:
return MongoDBAtlasVectorSearch(
mongodb_client=atlas_client,
db_name=DB_NAME,
collection_name=COLLECTION_NAME,
vector_index_name=VECTOR_INDEX_NAME,
)
@pytest.mark.skipif(
os.environ.get("MONGODB_URI") is None, reason="Requires MONGODB_URI in os.environ"
)
def test_search_index_commands_standalone(collection: Collection) -> None:
"""Tests create, update, and drop index utility functions."""
index_name = VECTOR_INDEX_NAME
dimensions = DIMENSIONS
path = "embedding"
similarity = "cosine"
filters: Optional[List[str]] = None
wait_until_complete = TIMEOUT
for index_info in collection.list_search_indexes():
index.drop_vector_search_index(
collection, index_info["name"], wait_until_complete=wait_until_complete
)
assert len(list(collection.list_search_indexes())) == 0
# Create a Vector Search Index on index_name
index.create_vector_search_index(
collection=collection,
index_name=index_name,
dimensions=dimensions,
path=path,
similarity=similarity,
filters=filters,
wait_until_complete=wait_until_complete,
)
indexes = list(collection.list_search_indexes())
assert len(indexes) == 1
assert indexes[0]["name"] == index_name
# Update that index by adding a filter
# This will additionally index the "bar" and "foo" fields
# The Update method is not yet supported in Atlas Local.
if "mongodb+srv" in os.environ.get("MONGODB_URI"):
new_similarity = "euclidean"
index.update_vector_search_index(
collection=collection,
index_name=index_name,
dimensions=DIMENSIONS,
path="embedding",
similarity=new_similarity,
filters=[FILTER_FIELD_NAME],
wait_until_complete=wait_until_complete,
)
indexes = list(collection.list_search_indexes())
assert len(indexes) == 1
assert indexes[0]["name"] == index_name
fields = indexes[0]["latestDefinition"]["fields"]
assert len(fields) == 2
assert {"type": "filter", "path": FILTER_FIELD_NAME} in fields
assert {
"numDimensions": DIMENSIONS,
"path": "embedding",
"similarity": f"{new_similarity}",
"type": "vector",
} in fields
# Now add a full-text search index for the filter field
index.create_fulltext_search_index(
collection=collection,
index_name=FULLTEXT_INDEX_NAME,
field=FILTER_FIELD_NAME,
field_type=FILTER_FIELD_TYPE,
wait_until_complete=TIMEOUT,
)
indexes = list(collection.list_search_indexes())
assert len(indexes) == 2
assert any(idx["name"] == FULLTEXT_INDEX_NAME for idx in indexes)
idx_fulltext = (
indexes[0] if indexes[0]["name"] == FULLTEXT_INDEX_NAME else indexes[1]
)
assert idx_fulltext["type"] == "search"
fields = idx_fulltext["latestDefinition"]["mappings"]["fields"]
assert fields[FILTER_FIELD_NAME]["type"] == FILTER_FIELD_TYPE
# Finally, drop the index
for name in [FULLTEXT_INDEX_NAME, VECTOR_INDEX_NAME]:
index.drop_vector_search_index(
collection, name, wait_until_complete=wait_until_complete
)
indexes = list(collection.list_search_indexes())
for idx in indexes:
assert idx["status"] == "DELETING"
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import NdArray, TorchTensor
class NpDoc(BaseDoc):
embedding: NdArray[3, 4]
embedding_no_shape: NdArray
class TorchDoc(BaseDoc):
embedding: TorchTensor[3, 4]
embedding_no_shape: TorchTensor
def test_np_schema():
schema = NpDoc.schema()
assert schema['properties']['embedding']['tensor/array shape'] == '[3, 4]'
assert schema['properties']['embedding']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
assert (
schema['properties']['embedding']['example']
== orjson_dumps(np.zeros([3, 4])).decode()
)
assert (
schema['properties']['embedding_no_shape']['tensor/array shape']
== 'not specified'
)
assert schema['properties']['embedding_no_shape']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
def test_torch_schema():
schema = TorchDoc.schema()
assert schema['properties']['embedding']['tensor/array shape'] == '[3, 4]'
assert schema['properties']['embedding']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
assert (
schema['properties']['embedding']['example']
== orjson_dumps(np.zeros([3, 4])).decode()
)
assert (
schema['properties']['embedding_no_shape']['tensor/array shape']
== 'not specified'
)
assert schema['properties']['embedding_no_shape']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
@pytest.mark.tensorflow
def test_tensorflow_schema():
from docarray.typing import TensorFlowTensor
class TensorflowDoc(BaseDoc):
embedding: TensorFlowTensor[3, 4]
embedding_no_shape: TensorFlowTensor
schema = TensorflowDoc.schema()
assert schema['properties']['embedding']['tensor/array shape'] == '[3, 4]'
assert schema['properties']['embedding']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
assert (
schema['properties']['embedding']['example']
== orjson_dumps(np.zeros([3, 4])).decode()
)
assert (
schema['properties']['embedding_no_shape']['tensor/array shape']
== 'not specified'
)
assert schema['properties']['embedding_no_shape']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
|
import numpy as np
import pytest
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import NdArray, TorchTensor
class NpDoc(BaseDoc):
embedding: NdArray[3, 4]
embedding_no_shape: NdArray
class TorchDoc(BaseDoc):
embedding: TorchTensor[3, 4]
embedding_no_shape: TorchTensor
def test_np_schema():
schema = NpDoc.schema()
assert schema['properties']['embedding']['tensor/array shape'] == '[3, 4]'
assert schema['properties']['embedding']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
assert (
schema['properties']['embedding']['example']
== orjson_dumps(np.zeros([3, 4])).decode()
)
assert (
schema['properties']['embedding_no_shape']['tensor/array shape']
== 'not specified'
)
assert schema['properties']['embedding_no_shape']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
def test_torch_schema():
schema = TorchDoc.schema()
assert schema['properties']['embedding']['tensor/array shape'] == '[3, 4]'
assert schema['properties']['embedding']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
assert (
schema['properties']['embedding']['example']
== orjson_dumps(np.zeros([3, 4])).decode()
)
assert (
schema['properties']['embedding_no_shape']['tensor/array shape']
== 'not specified'
)
assert schema['properties']['embedding_no_shape']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
@pytest.mark.tensorflow
def test_tensorflow_schema():
from docarray.typing import TensorFlowTensor
class TensorflowDoc(BaseDoc):
embedding: TensorFlowTensor[3, 4]
embedding_no_shape: TensorFlowTensor
schema = TensorflowDoc.schema()
assert schema['properties']['embedding']['tensor/array shape'] == '[3, 4]'
assert schema['properties']['embedding']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
assert (
schema['properties']['embedding']['example']
== orjson_dumps(np.zeros([3, 4])).decode()
)
assert (
schema['properties']['embedding_no_shape']['tensor/array shape']
== 'not specified'
)
assert schema['properties']['embedding_no_shape']['type'] == 'array'
assert schema['properties']['embedding']['items']['type'] == 'number'
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
from torch import Tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import BaseBoxes, HorizontalBoxes, get_box_tensor
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class PseudoBBoxCoder(BaseBBoxCoder):
"""Pseudo bounding box coder."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def encode(self, bboxes: Tensor, gt_bboxes: Union[Tensor,
BaseBoxes]) -> Tensor:
"""torch.Tensor: return the given ``bboxes``"""
gt_bboxes = get_box_tensor(gt_bboxes)
return gt_bboxes
def decode(self, bboxes: Tensor, pred_bboxes: Union[Tensor,
BaseBoxes]) -> Tensor:
"""torch.Tensor: return the given ``pred_bboxes``"""
if self.use_box_type:
pred_bboxes = HorizontalBoxes(pred_bboxes)
return pred_bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes, get_box_tensor
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class PseudoBBoxCoder(BaseBBoxCoder):
"""Pseudo bounding box coder."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def encode(self, bboxes, gt_bboxes):
"""torch.Tensor: return the given ``bboxes``"""
gt_bboxes = get_box_tensor(gt_bboxes)
return gt_bboxes
def decode(self, bboxes, pred_bboxes):
"""torch.Tensor: return the given ``pred_bboxes``"""
if self.use_box_type:
pred_bboxes = HorizontalBoxes(pred_bboxes)
return pred_bboxes
|
import os
from pathlib import Path
import pytest
from jina import Flow
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.deployments import Deployment
from jina.parsers import set_deployment_parser
from jina.serve.executors import BaseExecutor
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_simple_use_abs_import_shall_fail():
with pytest.raises(ModuleNotFoundError):
from .dummyhub_abs import DummyHubExecutorAbs
DummyHubExecutorAbs()
with pytest.raises(RuntimeFailToStart):
with Flow().add(uses='DummyHubExecutorAbs'):
pass
def test_simple_use_relative_import():
from .dummyhub import DummyHubExecutor
DummyHubExecutor()
with Flow().add(uses='DummyHubExecutor'):
pass
def test_use_from_local_dir_exe_level():
with BaseExecutor.load_config('dummyhub/config.yml'):
pass
def test_use_from_local_dir_deployment_level():
a = set_deployment_parser().parse_args(['--uses', 'dummyhub/config.yml'])
with Deployment(a):
pass
def test_use_from_local_dir_flow_level():
with Flow().add(uses='dummyhub/config.yml'):
pass
@pytest.fixture
def local_hub_executor(tmpdir):
from jina.hubble import HubExecutor, helper, hubapi
pkg_path = Path(__file__).parent / 'dummyhub'
stream_data = helper.archive_package(pkg_path)
with open(tmpdir / 'dummy_test.zip', 'wb') as temp_zip_file:
temp_zip_file.write(stream_data.getvalue())
hubapi.install_local(
Path(tmpdir) / 'dummy_test.zip', HubExecutor(uuid='hello', tag='v0')
)
def test_use_from_local_hub_deployment_level(
mocker, monkeypatch, local_hub_executor
):
from jina.hubble.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
tag=None,
secret=None,
image_required=True,
rebuild_image=True,
force=False,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
a = set_deployment_parser().parse_args(['--uses', 'jinahub://hello'])
with Deployment(a):
pass
def test_use_from_local_hub_flow_level(
mocker, monkeypatch, local_hub_executor
):
from jina.hubble.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
tag=None,
secret=None,
image_required=True,
rebuild_image=True,
force=False,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
with Flow().add(uses='jinahub://hello', install_requirements=True):
pass
|
import os
from pathlib import Path
import pytest
from jina import Flow
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.deployments import Deployment
from jina.parsers import set_deployment_parser
from jina.serve.executors import BaseExecutor
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_simple_use_abs_import_shall_fail():
with pytest.raises(ModuleNotFoundError):
from .dummyhub_abs import DummyHubExecutorAbs
DummyHubExecutorAbs()
with pytest.raises(RuntimeFailToStart):
with Flow().add(uses='DummyHubExecutorAbs'):
pass
def test_simple_use_relative_import():
from .dummyhub import DummyHubExecutor
DummyHubExecutor()
with Flow().add(uses='DummyHubExecutor'):
pass
def test_use_from_local_dir_exe_level():
with BaseExecutor.load_config('dummyhub/config.yml'):
pass
def test_use_from_local_dir_deployment_level():
a = set_deployment_parser().parse_args(['--uses', 'dummyhub/config.yml'])
with Deployment(a):
pass
def test_use_from_local_dir_flow_level():
with Flow().add(uses='dummyhub/config.yml'):
pass
@pytest.fixture
def local_hub_executor(tmpdir, test_envs):
from jina.hubble import HubExecutor, helper, hubapi
hubapi._hub_root = Path(os.environ.get('JINA_HUB_ROOT'))
pkg_path = Path(__file__).parent / 'dummyhub'
stream_data = helper.archive_package(pkg_path)
with open(tmpdir / 'dummy_test.zip', 'wb') as temp_zip_file:
temp_zip_file.write(stream_data.getvalue())
hubapi.install_local(
Path(tmpdir) / 'dummy_test.zip', HubExecutor(uuid='hello', tag='v0')
)
def test_use_from_local_hub_deployment_level(
test_envs, mocker, monkeypatch, local_hub_executor
):
from jina.hubble.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
tag=None,
secret=None,
image_required=True,
rebuild_image=True,
force=False,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
a = set_deployment_parser().parse_args(['--uses', 'jinahub://hello'])
with Deployment(a):
pass
def test_use_from_local_hub_flow_level(
test_envs, mocker, monkeypatch, local_hub_executor
):
from jina.hubble.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
tag=None,
secret=None,
image_required=True,
rebuild_image=True,
force=False,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
with Flow().add(uses='jinahub://hello', install_requirements=True):
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class FasterRCNN(TwoStageDetector):
"""Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(FasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class FasterRCNN(TwoStageDetector):
"""Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(FasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydantic
is_pydantic_v2 = pydantic.__version__.startswith('2.')
if not is_pydantic_v2:
from pydantic.validators import bytes_validator
else:
from pydantic.v1.validators import bytes_validator
__all__ = ['is_pydantic_v2', 'bytes_validator']
|
import pydantic
is_pydantic_v2 = pydantic.__version__.startswith('2.')
if not is_pydantic_v2:
from pydantic.validators import bytes_validator
else:
from pydantic.v1.validators import bytes_validator
__all__ = ['is_pydantic_v2', 'bytes_validator']
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.ops.core import _saturate_cast
@keras_export("keras.layers.AutoContrast")
class AutoContrast(BaseImagePreprocessingLayer):
"""Performs the auto-contrast operation on an image.
Auto contrast stretches the values of an image across the entire available
`value_range`. This makes differences between pixels more obvious. An
example of this is if an image only has values `[0, 1]` out of the range
`[0, 255]`, auto contrast will change the `1` values to be `255`.
This layer is active at both training and inference time.
Args:
value_range: Range of values the incoming images will have.
Represented as a two number tuple written `(low, high)`.
This is typically either `(0, 1)` or `(0, 255)` depending
on how your preprocessing pipeline is set up.
Defaults to `(0, 255)`.
"""
_USE_BASE_FACTOR = False
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(
self,
value_range=(0, 255),
**kwargs,
):
super().__init__(**kwargs)
self._set_value_range(value_range)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def transform_images(self, images, transformation=None, training=True):
original_images = images
images = self._transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
images = self.backend.cast(images, self.compute_dtype)
low = self.backend.numpy.min(images, axis=(1, 2), keepdims=True)
high = self.backend.numpy.max(images, axis=(1, 2), keepdims=True)
scale = 255.0 / (high - low)
offset = -low * scale
images = images * scale + offset
results = self.backend.numpy.clip(images, 0.0, 255.0)
results = self._transform_value_range(
results,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
# don't process NaN channels
results = self.backend.numpy.where(
self.backend.numpy.isnan(results), original_images, results
)
if results.dtype == images.dtype:
return results
if backend.is_int_dtype(images.dtype):
results = self.backend.numpy.round(results)
return _saturate_cast(results, images.dtype, self.backend)
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"value_range": self.value_range})
return config
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.ops.core import _saturate_cast
@keras_export("keras.layers.AutoContrast")
class AutoContrast(BaseImagePreprocessingLayer):
"""Performs the auto-contrast operation on an image.
Auto contrast stretches the values of an image across the entire available
`value_range`. This makes differences between pixels more obvious. An
example of this is if an image only has values `[0, 1]` out of the range
`[0, 255]`, auto contrast will change the `1` values to be `255`.
This layer is active at both training and inference time.
Args:
value_range: Range of values the incoming images will have.
Represented as a two number tuple written `(low, high)`.
This is typically either `(0, 1)` or `(0, 255)` depending
on how your preprocessing pipeline is set up.
Defaults to `(0, 255)`.
"""
_USE_BASE_FACTOR = False
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(
self,
value_range=(0, 255),
**kwargs,
):
super().__init__(**kwargs)
self._set_value_range(value_range)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def transform_images(self, images, transformation=None, training=True):
original_images = images
images = self._transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
images = self.backend.cast(images, self.compute_dtype)
low = self.backend.numpy.min(images, axis=(1, 2), keepdims=True)
high = self.backend.numpy.max(images, axis=(1, 2), keepdims=True)
scale = 255.0 / (high - low)
offset = -low * scale
images = images * scale + offset
results = self.backend.numpy.clip(images, 0.0, 255.0)
results = self._transform_value_range(
results,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
# don't process NaN channels
results = self.backend.numpy.where(
self.backend.numpy.is_nan(results), original_images, results
)
if results.dtype == images.dtype:
return results
if backend.is_int_dtype(images.dtype):
results = self.backend.numpy.round(results)
return _saturate_cast(results, images.dtype, self.backend)
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"value_range": self.value_range})
return config
|
from abc import abstractmethod
from typing import TYPE_CHECKING, List
from langchain_community.document_loaders.parsers.language.code_segmenter import (
CodeSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language, Parser
class TreeSitterSegmenter(CodeSegmenter):
"""Abstract class for `CodeSegmenter`s that use the tree-sitter library."""
def __init__(self, code: str):
super().__init__(code)
self.source_lines = self.code.splitlines()
try:
import tree_sitter # noqa: F401
import tree_sitter_languages # noqa: F401
except ImportError:
raise ImportError(
"Could not import tree_sitter/tree_sitter_languages Python packages. "
"Please install them with "
"`pip install tree-sitter tree-sitter-languages`."
)
def is_valid(self) -> bool:
language = self.get_language()
error_query = language.query("(ERROR) @error")
parser = self.get_parser()
tree = parser.parse(bytes(self.code, encoding="UTF-8"))
return len(error_query.captures(tree.root_node)) == 0
def extract_functions_classes(self) -> List[str]:
language = self.get_language()
query = language.query(self.get_chunk_query())
parser = self.get_parser()
tree = parser.parse(bytes(self.code, encoding="UTF-8"))
captures = query.captures(tree.root_node)
processed_lines = set()
chunks = []
for node, name in captures:
start_line = node.start_point[0]
end_line = node.end_point[0]
lines = list(range(start_line, end_line + 1))
if any(line in processed_lines for line in lines):
continue
processed_lines.update(lines)
chunk_text = node.text.decode("UTF-8")
chunks.append(chunk_text)
return chunks
def simplify_code(self) -> str:
language = self.get_language()
query = language.query(self.get_chunk_query())
parser = self.get_parser()
tree = parser.parse(bytes(self.code, encoding="UTF-8"))
processed_lines = set()
simplified_lines = self.source_lines[:]
for node, name in query.captures(tree.root_node):
start_line = node.start_point[0]
end_line = node.end_point[0]
lines = list(range(start_line, end_line + 1))
if any(line in processed_lines for line in lines):
continue
simplified_lines[start_line] = self.make_line_comment(
f"Code for: {self.source_lines[start_line]}"
)
for line_num in range(start_line + 1, end_line + 1):
simplified_lines[line_num] = None # type: ignore[call-overload]
processed_lines.update(lines)
return "\n".join(line for line in simplified_lines if line is not None)
def get_parser(self) -> "Parser":
from tree_sitter import Parser
parser = Parser()
parser.set_language(self.get_language())
return parser
@abstractmethod
def get_language(self) -> "Language":
raise NotImplementedError() # pragma: no cover
@abstractmethod
def get_chunk_query(self) -> str:
raise NotImplementedError() # pragma: no cover
@abstractmethod
def make_line_comment(self, text: str) -> str:
raise NotImplementedError() # pragma: no cover
|
from abc import abstractmethod
from typing import TYPE_CHECKING, List
from langchain_community.document_loaders.parsers.language.code_segmenter import (
CodeSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language, Parser
class TreeSitterSegmenter(CodeSegmenter):
"""Abstract class for `CodeSegmenter`s that use the tree-sitter library."""
def __init__(self, code: str):
super().__init__(code)
self.source_lines = self.code.splitlines()
try:
import tree_sitter # noqa: F401
import tree_sitter_languages # noqa: F401
except ImportError:
raise ImportError(
"Could not import tree_sitter/tree_sitter_languages Python packages. "
"Please install them with "
"`pip install tree-sitter tree-sitter-languages`."
)
def is_valid(self) -> bool:
language = self.get_language()
error_query = language.query("(ERROR) @error")
parser = self.get_parser()
tree = parser.parse(bytes(self.code, encoding="UTF-8"))
return len(error_query.captures(tree.root_node)) == 0
def extract_functions_classes(self) -> List[str]:
language = self.get_language()
query = language.query(self.get_chunk_query())
parser = self.get_parser()
tree = parser.parse(bytes(self.code, encoding="UTF-8"))
captures = query.captures(tree.root_node)
processed_lines = set()
chunks = []
for node, name in captures:
start_line = node.start_point[0]
end_line = node.end_point[0]
lines = list(range(start_line, end_line + 1))
if any(line in processed_lines for line in lines):
continue
processed_lines.update(lines)
chunk_text = node.text.decode("UTF-8")
chunks.append(chunk_text)
return chunks
def simplify_code(self) -> str:
language = self.get_language()
query = language.query(self.get_chunk_query())
parser = self.get_parser()
tree = parser.parse(bytes(self.code, encoding="UTF-8"))
processed_lines = set()
simplified_lines = self.source_lines[:]
for node, name in query.captures(tree.root_node):
start_line = node.start_point[0]
end_line = node.end_point[0]
lines = list(range(start_line, end_line + 1))
if any(line in processed_lines for line in lines):
continue
simplified_lines[start_line] = self.make_line_comment(
f"Code for: {self.source_lines[start_line]}"
)
for line_num in range(start_line + 1, end_line + 1):
simplified_lines[line_num] = None # type: ignore
processed_lines.update(lines)
return "\n".join(line for line in simplified_lines if line is not None)
def get_parser(self) -> "Parser":
from tree_sitter import Parser
parser = Parser()
parser.set_language(self.get_language())
return parser
@abstractmethod
def get_language(self) -> "Language":
raise NotImplementedError() # pragma: no cover
@abstractmethod
def get_chunk_query(self) -> str:
raise NotImplementedError() # pragma: no cover
@abstractmethod
def make_line_comment(self, text: str) -> str:
raise NotImplementedError() # pragma: no cover
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.21.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
list_datasets,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
# isort: split
# Deprecated modules
from . import arrow_dataset as _arrow_dataset
from . import utils as _utils
from .exceptions import ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
from .utils import download_manager as _deprecated_download_manager
from .utils import info_utils as _deprecated_info_utils
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
_deprecated_info_utils.ExpectedMoreDownloadedFiles = ExpectedMoreDownloadedFiles
_deprecated_info_utils.ExpectedMoreSplits = ExpectedMoreSplits
_deprecated_info_utils.UnexpectedDownloadedFile = UnexpectedDownloadedFile
_deprecated_info_utils.UnexpectedSplits = UnexpectedSplits
del _arrow_dataset, _utils, _deprecated_download_manager
del _deprecated_info_utils, ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.21.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
list_datasets,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# isort: split
# Deprecated modules
from . import arrow_dataset as _arrow_dataset
from . import utils as _utils
from .exceptions import ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
from .utils import download_manager as _deprecated_download_manager
from .utils import info_utils as _deprecated_info_utils
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
_deprecated_info_utils.ExpectedMoreDownloadedFiles = ExpectedMoreDownloadedFiles
_deprecated_info_utils.ExpectedMoreSplits = ExpectedMoreSplits
_deprecated_info_utils.UnexpectedDownloadedFile = UnexpectedDownloadedFile
_deprecated_info_utils.UnexpectedSplits = UnexpectedSplits
del _arrow_dataset, _utils, _deprecated_download_manager
del _deprecated_info_utils, ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Queries: 49.92307692307692
Average Corpus: 4334.7692307692305
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.72%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.21%
MRR@10: 0.6822
NDCG@10: 0.6204
Model Sparsity Stats Query : Row Non-Zero Mean: 74.93406589214618, Row Sparsity Mean: 0.9975449305314285
Model Sparsity Stats Corpus : Row Non-Zero Mean: 174.8070262028621, Row Sparsity Mean: 0.9942727547425491
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Querie: num_rows: 49.92307692307692, num_cols: 30522.0, row_non_zero_mean: 74.91560451801007, row_sparsity_mean: 0.9975455219929035
Average Corpus: num_rows: 4334.7692307692305, num_cols: 30522.0, row_non_zero_mean: 174.81000049297626, row_sparsity_mean: 0.9942726905529315
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.75%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.24%
MRR@10: 0.6821
NDCG@10: 0.6204
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
image_size = (640, 640)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
model = dict(
data_preprocessor=dict(pad_size_divisor=64, batch_augments=batch_augments),
backbone=dict(norm_cfg=norm_cfg, norm_eval=False),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(bbox_head=dict(norm_cfg=norm_cfg)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
allow_negative_crop=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=image_size, keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# learning policy
max_epochs = 50
train_cfg = dict(max_epochs=max_epochs, val_interval=2)
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[30, 40],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True),
clip_grad=None)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
image_size = (640, 640)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
model = dict(
data_preprocessor=dict(pad_size_divisor=64, batch_augments=batch_augments),
backbone=dict(norm_cfg=norm_cfg, norm_eval=False),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(bbox_head=dict(norm_cfg=norm_cfg)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
allow_negative_crop=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=image_size, keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# learning policy
max_epochs = 50
train_cfg = dict(max_epochs=max_epochs, val_interval=2)
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[30, 40],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True),
clip_grad=None)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
import os.path
from typing import Any, Callable, List, Optional, Tuple
from PIL import Image
from .vision import VisionDataset
class CocoDetection(VisionDataset):
"""`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset.
It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.PILToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __init__(
self,
root: str,
annFile: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
transforms: Optional[Callable] = None,
) -> None:
super().__init__(root, transforms, transform, target_transform)
from pycocotools.coco import COCO
self.coco = COCO(annFile)
self.ids = list(sorted(self.coco.imgs.keys()))
def _load_image(self, id: int) -> Image.Image:
path = self.coco.loadImgs(id)[0]["file_name"]
return Image.open(os.path.join(self.root, path)).convert("RGB")
def _load_target(self, id: int) -> List[Any]:
return self.coco.loadAnns(self.coco.getAnnIds(id))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
if not isinstance(index, int):
raise ValueError(f"Index must be of type integer, got {type(index)} instead.")
id = self.ids[index]
image = self._load_image(id)
target = self._load_target(id)
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.ids)
class CocoCaptions(CocoDetection):
"""`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset.
It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.PILToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
Example:
.. code:: python
import torchvision.datasets as dset
import torchvision.transforms as transforms
cap = dset.CocoCaptions(root = 'dir where images are',
annFile = 'json annotation file',
transform=transforms.PILToTensor())
print('Number of samples: ', len(cap))
img, target = cap[3] # load 4th sample
print("Image Size: ", img.size())
print(target)
Output: ::
Number of samples: 82783
Image Size: (3L, 427L, 640L)
[u'A plane emitting smoke stream flying over a mountain.',
u'A plane darts across a bright blue sky behind a mountain covered in snow',
u'A plane leaves a contrail above the snowy mountain top.',
u'A mountain that has a plane flying overheard in the distance.',
u'A mountain view with a plume of smoke in the background']
"""
def _load_target(self, id: int) -> List[str]:
return [ann["caption"] for ann in super()._load_target(id)]
|
import os.path
from typing import Any, Callable, List, Optional, Tuple
from PIL import Image
from .vision import VisionDataset
class CocoDetection(VisionDataset):
"""`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset.
It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.PILToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __init__(
self,
root: str,
annFile: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
transforms: Optional[Callable] = None,
) -> None:
super().__init__(root, transforms, transform, target_transform)
from pycocotools.coco import COCO
self.coco = COCO(annFile)
self.ids = list(sorted(self.coco.imgs.keys()))
def _load_image(self, id: int) -> Image.Image:
path = self.coco.loadImgs(id)[0]["file_name"]
return Image.open(os.path.join(self.root, path)).convert("RGB")
def _load_target(self, id: int) -> List[Any]:
return self.coco.loadAnns(self.coco.getAnnIds(id))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
id = self.ids[index]
image = self._load_image(id)
target = self._load_target(id)
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.ids)
class CocoCaptions(CocoDetection):
"""`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset.
It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.PILToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
Example:
.. code:: python
import torchvision.datasets as dset
import torchvision.transforms as transforms
cap = dset.CocoCaptions(root = 'dir where images are',
annFile = 'json annotation file',
transform=transforms.PILToTensor())
print('Number of samples: ', len(cap))
img, target = cap[3] # load 4th sample
print("Image Size: ", img.size())
print(target)
Output: ::
Number of samples: 82783
Image Size: (3L, 427L, 640L)
[u'A plane emitting smoke stream flying over a mountain.',
u'A plane darts across a bright blue sky behind a mountain covered in snow',
u'A plane leaves a contrail above the snowy mountain top.',
u'A mountain that has a plane flying overheard in the distance.',
u'A mountain view with a plume of smoke in the background']
"""
def _load_target(self, id: int) -> List[str]:
return [ann["caption"] for ann in super()._load_target(id)]
|
from typing import Optional
from ..utils.logging import get_logger
from .audio_classification import AudioClassification
from .automatic_speech_recognition import AutomaticSpeechRecognition
from .base import TaskTemplate
from .image_classification import ImageClassification
from .language_modeling import LanguageModeling
from .question_answering import QuestionAnsweringExtractive
from .summarization import Summarization
from .text_classification import TextClassification
__all__ = [
"AutomaticSpeechRecognition",
"AudioClassification",
"ImageClassification",
"LanguageModeling",
"QuestionAnsweringExtractive",
"Summarization",
"TaskTemplate",
"TextClassification",
]
logger = get_logger(__name__)
NAME2TEMPLATE = {
AutomaticSpeechRecognition.task: AutomaticSpeechRecognition,
AudioClassification.task: AudioClassification,
ImageClassification.task: ImageClassification,
LanguageModeling.task: LanguageModeling,
QuestionAnsweringExtractive.task: QuestionAnsweringExtractive,
Summarization.task: Summarization,
TextClassification.task: TextClassification,
}
def task_template_from_dict(task_template_dict: dict) -> Optional[TaskTemplate]:
"""Create one of the supported task templates in :py:mod:`datasets.tasks` from a dictionary."""
task_name = task_template_dict.get("task")
if task_name is None:
logger.warning(f"Couldn't find template for task '{task_name}'. Available templates: {list(NAME2TEMPLATE)}")
return None
template = NAME2TEMPLATE.get(task_name)
return template.from_dict(task_template_dict)
|
from typing import Optional
from ..utils.logging import get_logger
from .audio_classificiation import AudioClassification
from .automatic_speech_recognition import AutomaticSpeechRecognition
from .base import TaskTemplate
from .image_classification import ImageClassification
from .language_modeling import LanguageModeling
from .question_answering import QuestionAnsweringExtractive
from .summarization import Summarization
from .text_classification import TextClassification
__all__ = [
"AutomaticSpeechRecognition",
"AudioClassification",
"ImageClassification",
"LanguageModeling",
"QuestionAnsweringExtractive",
"Summarization",
"TaskTemplate",
"TextClassification",
]
logger = get_logger(__name__)
NAME2TEMPLATE = {
AutomaticSpeechRecognition.task: AutomaticSpeechRecognition,
AudioClassification.task: AudioClassification,
ImageClassification.task: ImageClassification,
LanguageModeling.task: LanguageModeling,
QuestionAnsweringExtractive.task: QuestionAnsweringExtractive,
Summarization.task: Summarization,
TextClassification.task: TextClassification,
}
def task_template_from_dict(task_template_dict: dict) -> Optional[TaskTemplate]:
"""Create one of the supported task templates in :py:mod:`datasets.tasks` from a dictionary."""
task_name = task_template_dict.get("task")
if task_name is None:
logger.warning(f"Couldn't find template for task '{task_name}'. Available templates: {list(NAME2TEMPLATE)}")
return None
template = NAME2TEMPLATE.get(task_name)
return template.from_dict(task_template_dict)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
torch = import_library('torch', raise_error=False)
tf = import_library('tensorflow', raise_error=False)
T = TypeVar('T', bound='PointsAndColors')
class PointsAndColors(BaseDoc):
"""
Document for handling the tensor data of a [`PointCloud3D`][docarray.documents.point_cloud.PointCloud3D] object.
A PointsAndColors Document can contain:
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the points in 3D space information (`PointsAndColors.points`)
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the points' color information (`PointsAndColors.colors`)
"""
points: AnyTensor
colors: Optional[AnyTensor] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(points=value)
return super().validate(value)
def display(self) -> None:
"""
Plot point cloud consisting of points in 3D space and optionally colors.
"""
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
from IPython.display import display
colors = (
self.colors
if self.colors is not None
else np.tile(
np.array([0, 0, 0]),
(self.points.get_comp_backend().shape(self.points)[0], 1),
)
)
pc = trimesh.points.PointCloud(vertices=self.points, colors=colors)
s = trimesh.Scene(geometry=pc)
display(s.show())
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
torch = import_library('torch', raise_error=False)
tf = import_library('tensorflow', raise_error=False)
T = TypeVar('T', bound='PointsAndColors')
class PointsAndColors(BaseDoc):
"""
Document for handling the tensor data of a [`PointCloud3D`][docarray.documents.point_cloud.PointCloud3D] object.
A PointsAndColors Document can contain:
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the points in 3D space information (`PointsAndColors.points`)
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the points' color information (`PointsAndColors.colors`)
"""
points: AnyTensor
colors: Optional[AnyTensor]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(points=value)
return super().validate(value)
def display(self) -> None:
"""
Plot point cloud consisting of points in 3D space and optionally colors.
"""
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
from IPython.display import display
colors = (
self.colors
if self.colors is not None
else np.tile(
np.array([0, 0, 0]),
(self.points.get_comp_backend().shape(self.points)[0], 1),
)
)
pc = trimesh.points.PointCloud(vertices=self.points, colors=colors)
s = trimesh.Scene(geometry=pc)
display(s.show())
|
import inspect
import threading
from typing import Any, Awaitable, Callable, ParamSpec, TypeVar, cast, overload
P = ParamSpec("P")
R = TypeVar("R")
@overload
def thread_cached(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]: ...
@overload
def thread_cached(func: Callable[P, R]) -> Callable[P, R]: ...
def thread_cached(
func: Callable[P, R] | Callable[P, Awaitable[R]],
) -> Callable[P, R] | Callable[P, Awaitable[R]]:
thread_local = threading.local()
if inspect.iscoroutinefunction(func):
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = await cast(Callable[P, Awaitable[R]], func)(
*args, **kwargs
)
return cache[key]
return async_wrapper
else:
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
# Include function in the key to prevent collisions between different functions
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return sync_wrapper
def clear_thread_cache(func: Callable[..., Any]) -> None:
"""Clear the cache for a thread-cached function."""
thread_local = threading.local()
cache = getattr(thread_local, "cache", None)
if cache is not None:
# Clear all entries that match the function
for key in list(cache.keys()):
if key and len(key) > 0 and key[0] == func:
del cache[key]
|
import inspect
import threading
from typing import Any, Awaitable, Callable, ParamSpec, TypeVar, cast, overload
P = ParamSpec("P")
R = TypeVar("R")
@overload
def thread_cached(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]: ...
@overload
def thread_cached(func: Callable[P, R]) -> Callable[P, R]: ...
def thread_cached(
func: Callable[P, R] | Callable[P, Awaitable[R]],
) -> Callable[P, R] | Callable[P, Awaitable[R]]:
thread_local = threading.local()
if inspect.iscoroutinefunction(func):
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (func, args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = await cast(Callable[P, Awaitable[R]], func)(
*args, **kwargs
)
return cache[key]
return async_wrapper
else:
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
# Include function in the key to prevent collisions between different functions
key = (func, args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return sync_wrapper
def clear_thread_cache(func: Callable[..., Any]) -> None:
"""Clear the cache for a thread-cached function."""
thread_local = threading.local()
cache = getattr(thread_local, "cache", None)
if cache is not None:
# Clear all entries that match the function
for key in list(cache.keys()):
if key and len(key) > 0 and key[0] == func:
del cache[key]
|
import torch
from parameterized import parameterized
from torchaudio.prototype.models import conformer_wav2vec2_base, conformer_wav2vec2_pretrain_base, emformer_hubert_base
from torchaudio_unittest.common_utils import nested_params, skipIfNoCuda, torch_script, TorchaudioTestCase
class TestSSLModel(TorchaudioTestCase):
def _smoke_test(self, model, feature_dim, device, dtype):
model = model.to(device=device, dtype=dtype)
model = model.eval()
batch_size, num_frames = 3, 1024
features = torch.randn(batch_size, num_frames, feature_dim, device=device, dtype=dtype)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
device=device,
)
model(features, lengths)
@nested_params(
[(conformer_wav2vec2_base, 64), (conformer_wav2vec2_pretrain_base, 64), (emformer_hubert_base, 80)],
[torch.float32, torch.float64],
)
def test_cpu_smoke_test(self, model_feature_dim, dtype):
model, feature_dim = model_feature_dim
model = model()
self._smoke_test(model, feature_dim, torch.device("cpu"), dtype)
@nested_params(
[(conformer_wav2vec2_base, 64), (conformer_wav2vec2_pretrain_base, 64), (emformer_hubert_base, 80)],
[torch.float32, torch.float64],
)
@skipIfNoCuda
def test_cuda_smoke_test(self, model_feature_dim, dtype):
model, feature_dim = model_feature_dim
model = model()
self._smoke_test(model, feature_dim, torch.device("cuda"), dtype)
@parameterized.expand(
[
(conformer_wav2vec2_base, 64, None),
(emformer_hubert_base, 80, None),
(emformer_hubert_base, 80, 512),
]
)
def test_extract_feature(self, model, feature_dim, aux_num_out):
if aux_num_out is not None:
model = model(aux_num_out=aux_num_out)
else:
model = model()
model.eval()
batch_size, num_frames = 3, 1024
if feature_dim == 64:
num_layers = len(model.encoder.conformer)
else:
num_layers = len(model.encoder.emformer.emformer_layers)
features = torch.randn(batch_size, num_frames, feature_dim)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
all_features, lengths_ = model.extract_features(features, lengths, num_layers=None)
assert len(all_features) == num_layers
for feats in all_features:
assert feats.ndim == 3
assert feats.shape[0] == batch_size
assert lengths_.shape == torch.Size([batch_size])
for l in range(1, num_layers + 1):
feats, lengths_ = model.extract_features(features, lengths, num_layers=l)
assert len(feats) == l
for i in range(l):
self.assertEqual(all_features[i], feats[i])
assert lengths_.shape == torch.Size([batch_size])
@parameterized.expand(
[
(conformer_wav2vec2_base, 64, None),
(emformer_hubert_base, 80, None),
(emformer_hubert_base, 80, 512),
]
)
def test_zero_length(self, model, feature_dim, aux_num_out):
if aux_num_out is not None:
model = model(aux_num_out=aux_num_out)
else:
model = model()
model.eval()
batch_size, num_frames = 3, 1024
features = torch.randn(batch_size, num_frames, feature_dim)
input_lengths = torch.zeros(batch_size)
_, output_lengths = model(features, input_lengths)
self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
_, output_lengths = model.extract_features(features, input_lengths)
self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
@parameterized.expand(
[
(conformer_wav2vec2_base, 64, None),
(emformer_hubert_base, 80, None),
(emformer_hubert_base, 80, 512),
]
)
def test_torchscript_consistency(self, model, feature_dim, aux_num_out):
if aux_num_out is not None:
model = model(aux_num_out=aux_num_out)
else:
model = model()
model.eval()
batch_size, num_frames = 3, 1024
features = torch.randn(batch_size, num_frames, feature_dim)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
ref_out, ref_len = model(features, lengths)
scripted = torch_script(model)
hyp_out, hyp_len = scripted(features, lengths)
self.assertEqual(hyp_out, ref_out)
self.assertEqual(hyp_len, ref_len)
|
import torch
from parameterized import parameterized
from torchaudio.prototype.models import conformer_wav2vec2_base, emformer_hubert_base
from torchaudio_unittest.common_utils import nested_params, skipIfNoCuda, torch_script, TorchaudioTestCase
class TestSSLModel(TorchaudioTestCase):
def _smoke_test(self, model, feature_dim, device, dtype):
model = model.to(device=device, dtype=dtype)
model = model.eval()
batch_size, num_frames = 3, 1024
features = torch.randn(batch_size, num_frames, feature_dim, device=device, dtype=dtype)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
device=device,
)
model(features, lengths)
@nested_params(
[(conformer_wav2vec2_base, 64), (emformer_hubert_base, 80)],
[torch.float32, torch.float64],
)
def test_cpu_smoke_test(self, model_feature_dim, dtype):
model, feature_dim = model_feature_dim
model = model()
self._smoke_test(model, feature_dim, torch.device("cpu"), dtype)
@nested_params(
[(conformer_wav2vec2_base, 64), (emformer_hubert_base, 80)],
[torch.float32, torch.float64],
)
@skipIfNoCuda
def test_cuda_smoke_test(self, model_feature_dim, dtype):
model, feature_dim = model_feature_dim
model = model()
self._smoke_test(model, feature_dim, torch.device("cuda"), dtype)
@parameterized.expand(
[
(conformer_wav2vec2_base, 64, None),
(emformer_hubert_base, 80, None),
(emformer_hubert_base, 80, 512),
]
)
def test_extract_feature(self, model, feature_dim, aux_num_out):
if aux_num_out is not None:
model = model(aux_num_out=aux_num_out)
else:
model = model()
model.eval()
batch_size, num_frames = 3, 1024
if feature_dim == 64:
num_layers = len(model.encoder.conformer)
else:
num_layers = len(model.encoder.emformer.emformer_layers)
features = torch.randn(batch_size, num_frames, feature_dim)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
all_features, lengths_ = model.extract_features(features, lengths, num_layers=None)
assert len(all_features) == num_layers
for feats in all_features:
assert feats.ndim == 3
assert feats.shape[0] == batch_size
assert lengths_.shape == torch.Size([batch_size])
for l in range(1, num_layers + 1):
feats, lengths_ = model.extract_features(features, lengths, num_layers=l)
assert len(feats) == l
for i in range(l):
self.assertEqual(all_features[i], feats[i])
assert lengths_.shape == torch.Size([batch_size])
@parameterized.expand(
[
(conformer_wav2vec2_base, 64, None),
(emformer_hubert_base, 80, None),
(emformer_hubert_base, 80, 512),
]
)
def test_zero_length(self, model, feature_dim, aux_num_out):
if aux_num_out is not None:
model = model(aux_num_out=aux_num_out)
else:
model = model()
model.eval()
batch_size, num_frames = 3, 1024
features = torch.randn(batch_size, num_frames, feature_dim)
input_lengths = torch.zeros(batch_size)
_, output_lengths = model(features, input_lengths)
self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
_, output_lengths = model.extract_features(features, input_lengths)
self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
@parameterized.expand(
[
(conformer_wav2vec2_base, 64, None),
(emformer_hubert_base, 80, None),
(emformer_hubert_base, 80, 512),
]
)
def test_torchscript_consistency(self, model, feature_dim, aux_num_out):
if aux_num_out is not None:
model = model(aux_num_out=aux_num_out)
else:
model = model()
model.eval()
batch_size, num_frames = 3, 1024
features = torch.randn(batch_size, num_frames, feature_dim)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
ref_out, ref_len = model(features, lengths)
scripted = torch_script(model)
hyp_out, hyp_len = scripted(features, lengths)
self.assertEqual(hyp_out, ref_out)
self.assertEqual(hyp_len, ref_len)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
git_repo_path = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def pytest_configure(config):
config.addinivalue_line("markers", "big_accelerator: marks tests as requiring big accelerator resources")
def pytest_addoption(parser):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(parser)
def pytest_terminal_summary(terminalreporter):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
make_reports = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(terminalreporter, id=make_reports)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
git_repo_path = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def pytest_addoption(parser):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(parser)
def pytest_terminal_summary(terminalreporter):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
make_reports = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(terminalreporter, id=make_reports)
|
import os
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langchain_core.tools import tool
from pydantic import BaseModel
from langchain_community.chat_models import MiniMaxChat
def test_chat_minimax_not_group_id() -> None:
if "MINIMAX_GROUP_ID" in os.environ:
del os.environ["MINIMAX_GROUP_ID"]
chat = MiniMaxChat() # type: ignore[call-arg]
response = chat.invoke("你好呀")
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_chat_minimax_with_stream() -> None:
chat = MiniMaxChat() # type: ignore[call-arg]
for chunk in chat.stream("你好呀"):
assert isinstance(chunk, AIMessage)
assert isinstance(chunk.content, str)
@tool
def add(a: int, b: int) -> int:
"""Adds a and b."""
return a + b
@tool
def multiply(a: int, b: int) -> int:
"""Multiplies a and b."""
return a * b
def test_chat_minimax_with_tool() -> None:
"""Test MinimaxChat with bind tools."""
chat = MiniMaxChat() # type: ignore[call-arg]
tools = [add, multiply]
chat_with_tools = chat.bind_tools(tools)
query = "What is 3 * 12?"
messages = [HumanMessage(query)]
ai_msg = chat_with_tools.invoke(messages)
assert isinstance(ai_msg, AIMessage)
assert isinstance(ai_msg.tool_calls, list)
assert len(ai_msg.tool_calls) == 1
tool_call = ai_msg.tool_calls[0]
assert "args" in tool_call
messages.append(ai_msg) # type: ignore[arg-type]
for tool_call in ai_msg.tool_calls:
selected_tool = {"add": add, "multiply": multiply}[tool_call["name"].lower()]
tool_output = selected_tool.invoke(tool_call["args"])
messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"])) # type: ignore[arg-type]
response = chat_with_tools.invoke(messages)
assert isinstance(response, AIMessage)
class AnswerWithJustification(BaseModel):
"""An answer to the user question along with justification for the answer."""
answer: str
justification: str
def test_chat_minimax_with_structured_output() -> None:
"""Test MiniMaxChat with structured output."""
llm = MiniMaxChat() # type: ignore[call-arg]
structured_llm = llm.with_structured_output(AnswerWithJustification)
response = structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
assert isinstance(response, AnswerWithJustification)
def test_chat_tongyi_with_structured_output_include_raw() -> None:
"""Test MiniMaxChat with structured output."""
llm = MiniMaxChat() # type: ignore[call-arg]
structured_llm = llm.with_structured_output(
AnswerWithJustification, include_raw=True
)
response = structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
assert isinstance(response, dict)
assert isinstance(response.get("raw"), AIMessage)
assert isinstance(response.get("parsed"), AnswerWithJustification)
|
import os
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langchain_core.tools import tool
from pydantic import BaseModel
from langchain_community.chat_models import MiniMaxChat
def test_chat_minimax_not_group_id() -> None:
if "MINIMAX_GROUP_ID" in os.environ:
del os.environ["MINIMAX_GROUP_ID"]
chat = MiniMaxChat() # type: ignore[call-arg]
response = chat.invoke("你好呀")
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_chat_minimax_with_stream() -> None:
chat = MiniMaxChat() # type: ignore[call-arg]
for chunk in chat.stream("你好呀"):
assert isinstance(chunk, AIMessage)
assert isinstance(chunk.content, str)
@tool
def add(a: int, b: int) -> int:
"""Adds a and b."""
return a + b
@tool
def multiply(a: int, b: int) -> int:
"""Multiplies a and b."""
return a * b
def test_chat_minimax_with_tool() -> None:
"""Test MinimaxChat with bind tools."""
chat = MiniMaxChat() # type: ignore[call-arg]
tools = [add, multiply]
chat_with_tools = chat.bind_tools(tools)
query = "What is 3 * 12?"
messages = [HumanMessage(query)]
ai_msg = chat_with_tools.invoke(messages)
assert isinstance(ai_msg, AIMessage)
assert isinstance(ai_msg.tool_calls, list)
assert len(ai_msg.tool_calls) == 1
tool_call = ai_msg.tool_calls[0]
assert "args" in tool_call
messages.append(ai_msg) # type: ignore[arg-type]
for tool_call in ai_msg.tool_calls:
selected_tool = {"add": add, "multiply": multiply}[tool_call["name"].lower()]
tool_output = selected_tool.invoke(tool_call["args"]) # type: ignore[attr-defined]
messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"])) # type: ignore[arg-type]
response = chat_with_tools.invoke(messages)
assert isinstance(response, AIMessage)
class AnswerWithJustification(BaseModel):
"""An answer to the user question along with justification for the answer."""
answer: str
justification: str
def test_chat_minimax_with_structured_output() -> None:
"""Test MiniMaxChat with structured output."""
llm = MiniMaxChat() # type: ignore[call-arg]
structured_llm = llm.with_structured_output(AnswerWithJustification)
response = structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
assert isinstance(response, AnswerWithJustification)
def test_chat_tongyi_with_structured_output_include_raw() -> None:
"""Test MiniMaxChat with structured output."""
llm = MiniMaxChat() # type: ignore[call-arg]
structured_llm = llm.with_structured_output(
AnswerWithJustification, include_raw=True
)
response = structured_llm.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
assert isinstance(response, dict)
assert isinstance(response.get("raw"), AIMessage)
assert isinstance(response.get("parsed"), AnswerWithJustification)
|
_base_ = '../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py'
# learning policy
max_epochs = 28
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[24, 27],
gamma=0.1)
]
|
_base_ = '../htc/htc_x101_64x4d_fpn_16x1_20e_coco.py'
# learning policy
lr_config = dict(step=[24, 27])
runner = dict(type='EpochBasedRunner', max_epochs=28)
|
"""
The pre-trained models produce embeddings of size 512 - 1024. However, when storing a large
number of embeddings, this requires quite a lot of memory / storage.
In this example, we reduce the dimensionality of the embeddings to e.g. 128 dimensions. This significantly
reduces the required memory / storage while maintaining nearly the same performance.
For dimensionality reduction, we compute embeddings for a large set of (representative) sentence. Then,
we use PCA to find e.g. 128 principle components of our vector space. This allows us to maintain
us much information as possible with only 128 dimensions.
PCA gives us a matrix that down-projects vectors to 128 dimensions. We use this matrix
and extend our original SentenceTransformer model with this linear downproject. Hence,
the new SentenceTransformer model will produce directly embeddings with 128 dimensions
without further changes needed.
"""
import logging
import random
import numpy as np
import torch
from sklearn.decomposition import PCA
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Model for which we apply dimensionality reduction
model_name = "all-MiniLM-L6-v2"
model = SentenceTransformer(model_name)
# New size for the embeddings
new_dimension = 128
# We measure the performance of the original model
# and later we will measure the performance with the reduces dimension size
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
stsb_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
name="sts-test",
)
logging.info("Original model performance:")
stsb_evaluator(model)
######## Reduce the embedding dimensions ########
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-score", split="train")
nli_sentences = train_dataset["sentence1"] + train_dataset["sentence2"]
random.shuffle(nli_sentences)
# To determine the PCA matrix, we need some example sentence embeddings.
# Here, we compute the embeddings for 20k random sentences from the AllNLI dataset
pca_train_sentences = nli_sentences[0:20000]
train_embeddings = model.encode(pca_train_sentences, convert_to_numpy=True)
# Compute PCA on the train embeddings matrix
pca = PCA(n_components=new_dimension)
pca.fit(train_embeddings)
pca_comp = np.asarray(pca.components_)
# We add a dense layer to the model, so that it will produce directly embeddings with the new size
dense = models.Dense(
in_features=model.get_sentence_embedding_dimension(),
out_features=new_dimension,
bias=False,
activation_function=torch.nn.Identity(),
)
dense.linear.weight = torch.nn.Parameter(torch.tensor(pca_comp))
model.add_module("dense", dense)
# Evaluate the model with the reduce embedding size
logging.info("Model with {} dimensions:".format(new_dimension))
stsb_evaluator(model)
# If you like, you can store the model on disc by uncommenting the following line
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
model.save(f"{model_name}-128dim")
# You can then load the adapted model that produces 128 dimensional embeddings like this:
# model = SentenceTransformer('models/my-128dim-model')
# Or you can push the model to the Hugging Face Hub
# model.push_to_hub(f'{model_name}-128dim')
|
"""
The pre-trained models produce embeddings of size 512 - 1024. However, when storing a large
number of embeddings, this requires quite a lot of memory / storage.
In this example, we reduce the dimensionality of the embeddings to e.g. 128 dimensions. This significantly
reduces the required memory / storage while maintaining nearly the same performance.
For dimensionality reduction, we compute embeddings for a large set of (representative) sentence. Then,
we use PCA to find e.g. 128 principle components of our vector space. This allows us to maintain
us much information as possible with only 128 dimensions.
PCA gives us a matrix that down-projects vectors to 128 dimensions. We use this matrix
and extend our original SentenceTransformer model with this linear downproject. Hence,
the new SentenceTransformer model will produce directly embeddings with 128 dimensions
without further changes needed.
"""
from sklearn.decomposition import PCA
from sentence_transformers import SentenceTransformer, LoggingHandler, util, evaluation, models, InputExample
import logging
import os
import gzip
import csv
import random
import numpy as np
import torch
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Model for which we apply dimensionality reduction
model = SentenceTransformer("all-MiniLM-L6-v2")
# New size for the embeddings
new_dimension = 128
# We use AllNLI as a source of sentences to compute PCA
nli_dataset_path = "datasets/AllNLI.tsv.gz"
# We use the STS benchmark dataset to see how much performance we loose by using the dimensionality reduction
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# We measure the performance of the original model
# and later we will measure the performance with the reduces dimension size
logger.info("Read STSbenchmark test dataset")
eval_examples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "test":
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
eval_examples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
# Evaluate the original model on the STS benchmark dataset
stsb_evaluator = evaluation.EmbeddingSimilarityEvaluator.from_input_examples(eval_examples, name="sts-benchmark-test")
logger.info("Original model performance:")
stsb_evaluator(model)
######## Reduce the embedding dimensions ########
# Read sentences from NLI dataset
nli_sentences = set()
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
nli_sentences.add(row["sentence1"])
nli_sentences.add(row["sentence2"])
nli_sentences = list(nli_sentences)
random.shuffle(nli_sentences)
# To determine the PCA matrix, we need some example sentence embeddings.
# Here, we compute the embeddings for 20k random sentences from the AllNLI dataset
pca_train_sentences = nli_sentences[0:20000]
train_embeddings = model.encode(pca_train_sentences, convert_to_numpy=True)
# Compute PCA on the train embeddings matrix
pca = PCA(n_components=new_dimension)
pca.fit(train_embeddings)
pca_comp = np.asarray(pca.components_)
# We add a dense layer to the model, so that it will produce directly embeddings with the new size
dense = models.Dense(
in_features=model.get_sentence_embedding_dimension(),
out_features=new_dimension,
bias=False,
activation_function=torch.nn.Identity(),
)
dense.linear.weight = torch.nn.Parameter(torch.tensor(pca_comp))
model.add_module("dense", dense)
# Evaluate the model with the reduce embedding size
logger.info("Model with {} dimensions:".format(new_dimension))
stsb_evaluator(model)
# If you like, you can store the model on disc by uncommenting the following line
# model.save('models/my-128dim-model')
# You can then load the adapted model that produces 128 dimensional embeddings like this:
# model = SentenceTransformer('models/my-128dim-model')
|
"""Utils for OpenAI agent."""
from typing import List, Union
from llama_index.core.tools import BaseTool
def get_function_by_name(tools: List[BaseTool], name: str) -> BaseTool:
"""Get function by name."""
name_to_tool = {tool.metadata.name: tool for tool in tools}
if name not in name_to_tool:
raise ValueError(f"Tool with name {name} not found")
return name_to_tool[name]
def resolve_tool_choice(tool_choice: Union[str, dict] = "auto") -> Union[str, dict]:
"""
Resolve tool choice.
If tool_choice is a function name string, return the appropriate dict.
"""
if isinstance(tool_choice, str) and tool_choice not in ["none", "auto"]:
return {"type": "function", "function": {"name": tool_choice}}
return tool_choice
|
"""Utils for OpenAI agent."""
from typing import List, Union
from llama_index.core.tools import BaseTool
def get_function_by_name(tools: List[BaseTool], name: str) -> BaseTool:
"""Get function by name."""
name_to_tool = {tool.metadata.name: tool for tool in tools}
if name not in name_to_tool:
raise ValueError(f"Tool with name {name} not found")
return name_to_tool[name]
def resolve_tool_choice(tool_choice: Union[str, dict] = "auto") -> Union[str, dict]:
"""Resolve tool choice.
If tool_choice is a function name string, return the appropriate dict.
"""
if isinstance(tool_choice, str) and tool_choice not in ["none", "auto"]:
return {"type": "function", "function": {"name": tool_choice}}
return tool_choice
|
from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
)
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[Image]
class Middle(BaseDocument):
img: Optional[Image]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[Image]
middle: Optional[Middle]
da: DocumentArray[Inner]
doc = Outer(
img=Image(),
middle=Middle(img=Image(), inner=Inner(img=Image())),
da=DocumentArray[Inner]([Inner(img=Image(url='test.png'))]),
)
return doc
def test_is_access_path_valid(nested_doc):
assert _is_access_path_valid(nested_doc.__class__, 'img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__inner__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle')
assert _is_access_path_valid(nested_doc.__class__, 'da__img__url')
def test_is_access_path_not_valid(nested_doc):
assert not _is_access_path_valid(nested_doc.__class__, 'inner')
assert not _is_access_path_valid(nested_doc.__class__, 'some__other__path')
assert not _is_access_path_valid(nested_doc.__class__, 'middle.inner')
def test_get_access_paths():
class Painting(BaseDocument):
title: str
img: Image
access_paths = Painting._get_access_paths()
assert access_paths == [
'id',
'title',
'img__id',
'img__url',
'img__tensor',
'img__embedding',
'img__bytes',
]
def test_dict_to_access_paths():
d = {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
casted = _dict_to_access_paths(d)
assert casted == {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
def test_access_path_to_dict():
access_path = 'a__b__c__d__e'
value = 1
result = {'a': {'b': {'c': {'d': {'e': value}}}}}
assert _access_path_to_dict(access_path, value) == result
def test_access_path_dict_to_nested_dict():
d = {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
casted = _access_path_dict_to_nested_dict(d)
assert casted == {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
def test_update_nested_dict():
d1 = {'text': 'hello', 'image': {'tensor': None}}
d2 = {'image': {'url': 'some.png'}}
_update_nested_dicts(d1, d2)
assert d1 == {'text': 'hello', 'image': {'tensor': None, 'url': 'some.png'}}
|
from typing import Optional
import pytest
from docarray import BaseDocument
from docarray.documents import Image
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
)
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[Image]
class Middle(BaseDocument):
img: Optional[Image]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[Image]
middle: Optional[Middle]
doc = Outer(img=Image(), middle=Middle(img=Image(), inner=Inner(img=Image())))
return doc
def test_is_access_path_valid(nested_doc):
assert _is_access_path_valid(nested_doc.__class__, 'img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__inner__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle')
def test_is_access_path_not_valid(nested_doc):
assert not _is_access_path_valid(nested_doc.__class__, 'inner')
assert not _is_access_path_valid(nested_doc.__class__, 'some__other__path')
assert not _is_access_path_valid(nested_doc.__class__, 'middle.inner')
def test_get_access_paths():
class Painting(BaseDocument):
title: str
img: Image
access_paths = Painting._get_access_paths()
assert access_paths == [
'id',
'title',
'img__id',
'img__url',
'img__tensor',
'img__embedding',
'img__bytes',
]
def test_dict_to_access_paths():
d = {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
casted = _dict_to_access_paths(d)
assert casted == {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
def test_access_path_to_dict():
access_path = 'a__b__c__d__e'
value = 1
result = {'a': {'b': {'c': {'d': {'e': value}}}}}
assert _access_path_to_dict(access_path, value) == result
def test_access_path_dict_to_nested_dict():
d = {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
casted = _access_path_dict_to_nested_dict(d)
assert casted == {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
def test_update_nested_dict():
d1 = {'text': 'hello', 'image': {'tensor': None}}
d2 = {'image': {'url': 'some.png'}}
_update_nested_dicts(d1, d2)
assert d1 == {'text': 'hello', 'image': {'tensor': None, 'url': 'some.png'}}
|
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
img_scale = (640, 640)
# model settings
model = dict(
type='YOLOX',
input_size=img_scale,
random_size_range=(15, 25),
random_size_interval=10,
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
neck=dict(
type='YOLOXPAFPN',
in_channels=[128, 256, 512],
out_channels=128,
num_csp_blocks=1),
bbox_head=dict(
type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128),
train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)),
# In order to align the source code, the threshold of the val phase is
# 0.01, and the threshold of the test phase is 0.001.
test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
# dataset settings
data_root = 'data/coco/'
dataset_type = 'CocoDataset'
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
# According to the official implementation, multi-scale
# training is not considered here but in the
# 'mmdet/models/detectors/yolox.py'.
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
# If the image is three-channel, the pad value needs
# to be set separately for each channel.
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=train_pipeline)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
persistent_workers=True,
train=train_dataset,
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
# default 8 gpu
optimizer = dict(
type='SGD',
lr=0.01,
momentum=0.9,
weight_decay=5e-4,
nesterov=True,
paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
max_epochs = 300
num_last_epochs = 15
resume_from = None
interval = 10
# learning policy
lr_config = dict(
_delete_=True,
policy='YOLOX',
warmup='exp',
by_epoch=False,
warmup_by_epoch=True,
warmup_ratio=1,
warmup_iters=5, # 5 epoch
num_last_epochs=num_last_epochs,
min_lr_ratio=0.05)
runner = dict(type='EpochBasedRunner', max_epochs=max_epochs)
custom_hooks = [
dict(
type='YOLOXModeSwitchHook',
num_last_epochs=num_last_epochs,
priority=48),
dict(
type='SyncNormHook',
num_last_epochs=num_last_epochs,
interval=interval,
priority=48),
dict(
type='ExpMomentumEMAHook',
resume_from=resume_from,
momentum=0.0001,
priority=49)
]
checkpoint_config = dict(interval=interval)
evaluation = dict(
save_best='auto',
# The evaluation interval is 'interval' when running epoch is
# less than ‘max_epochs - num_last_epochs’.
# The evaluation interval is 1 when running epoch is greater than
# or equal to ‘max_epochs - num_last_epochs’.
interval=interval,
dynamic_intervals=[(max_epochs - num_last_epochs, 1)],
metric='bbox')
log_config = dict(interval=50)
|
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
# model settings
model = dict(
type='YOLOX',
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
neck=dict(
type='YOLOXPAFPN',
in_channels=[128, 256, 512],
out_channels=128,
num_csp_blocks=1),
bbox_head=dict(
type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128),
train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)),
# In order to align the source code, the threshold of the val phase is
# 0.01, and the threshold of the test phase is 0.001.
test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
# dataset settings
data_root = 'data/coco/'
dataset_type = 'CocoDataset'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (640, 640)
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', keep_ratio=True),
dict(type='Pad', pad_to_square=True, pad_val=114.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=[
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=train_pipeline,
dynamic_scale=img_scale)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size=img_scale, pad_val=114.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=2,
train=train_dataset,
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
# default 8 gpu
optimizer = dict(
type='SGD',
lr=0.01,
momentum=0.9,
weight_decay=5e-4,
nesterov=True,
paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
_delete_=True,
policy='YOLOX',
warmup='exp',
by_epoch=False,
warmup_by_epoch=True,
warmup_ratio=1,
warmup_iters=5, # 5 epoch
num_last_epochs=15,
min_lr_ratio=0.05)
runner = dict(type='EpochBasedRunner', max_epochs=300)
resume_from = None
interval = 10
custom_hooks = [
dict(type='YOLOXModeSwitchHook', num_last_epochs=15, priority=48),
dict(
type='SyncRandomSizeHook',
ratio_range=(14, 26),
img_scale=img_scale,
priority=48),
dict(
type='SyncNormHook',
num_last_epochs=15,
interval=interval,
priority=48),
dict(type='ExpMomentumEMAHook', resume_from=resume_from, priority=49)
]
checkpoint_config = dict(interval=interval)
evaluation = dict(interval=interval, metric='bbox')
log_config = dict(interval=50)
|
from enum import Enum
from fsspec import AbstractFileSystem
from pathlib import Path
from typing import Any, Dict, Iterable, Optional, Protocol, runtime_checkable
import json
import uuid
from docling.document_converter import DocumentConverter
from docling_core.types import DoclingDocument as DLDocument
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core import Document as LIDocument
from pydantic import Field
class DoclingReader(BasePydanticReader):
"""Docling Reader.
Extracts PDF, DOCX, and other document formats into LlamaIndex Documents as either Markdown or JSON-serialized Docling native format.
Args:
export_type (Literal["markdown", "json"], optional): The type to export to. Defaults to "markdown".
doc_converter (DocumentConverter, optional): The Docling converter to use. Default factory: `DocumentConverter`.
md_export_kwargs (Dict[str, Any], optional): Kwargs to use in case of markdown export. Defaults to `{"image_placeholder": ""}`.
id_func: (DocIDGenCallable, optional): Doc ID generation function to use. Default: `_uuid4_doc_id_gen`
"""
class ExportType(str, Enum):
MARKDOWN = "markdown"
JSON = "json"
@runtime_checkable
class DocIDGenCallable(Protocol):
def __call__(self, doc: DLDocument, file_path: str | Path) -> str:
...
@staticmethod
def _uuid4_doc_id_gen(doc: DLDocument, file_path: str | Path) -> str:
return str(uuid.uuid4())
export_type: ExportType = ExportType.MARKDOWN
doc_converter: DocumentConverter = Field(default_factory=DocumentConverter)
md_export_kwargs: Dict[str, Any] = {"image_placeholder": ""}
id_func: DocIDGenCallable = _uuid4_doc_id_gen
def lazy_load_data(
self,
file_path: str | Path | Iterable[str] | Iterable[Path],
extra_info: dict | None = None,
fs: Optional[AbstractFileSystem] = None,
) -> Iterable[LIDocument]:
"""Lazily load from given source.
Args:
file_path (str | Path | Iterable[str] | Iterable[Path]): Document file source as single str (URL or local file) or pathlib.Path — or iterable thereof
extra_info (dict | None, optional): Any pre-existing metadata to include. Defaults to None.
Returns:
Iterable[LIDocument]: Iterable over the created LlamaIndex documents.
"""
file_paths = (
file_path
if isinstance(file_path, Iterable) and not isinstance(file_path, str)
else [file_path]
)
for source in file_paths:
dl_doc = self.doc_converter.convert(source).document
text: str
if self.export_type == self.ExportType.MARKDOWN:
text = dl_doc.export_to_markdown(**self.md_export_kwargs)
elif self.export_type == self.ExportType.JSON:
text = json.dumps(dl_doc.export_to_dict())
else:
raise ValueError(f"Unexpected export type: {self.export_type}")
li_doc = LIDocument(
doc_id=self.id_func(doc=dl_doc, file_path=source),
text=text,
)
li_doc.metadata = extra_info or {}
yield li_doc
|
from enum import Enum
from fsspec import AbstractFileSystem
from pathlib import Path
from typing import Any, Dict, Iterable, Optional, Protocol, runtime_checkable
import json
import uuid
from docling.document_converter import DocumentConverter
from docling_core.types import DoclingDocument as DLDocument
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core import Document as LIDocument
from pydantic import Field
class DoclingReader(BasePydanticReader):
"""Docling Reader.
Extracts PDF, DOCX, and other document formats into LlamaIndex Documents as either Markdown or JSON-serialized Docling native format.
Args:
export_type (Literal["markdown", "json"], optional): The type to export to. Defaults to "markdown".
doc_converter (DocumentConverter, optional): The Docling converter to use. Default factory: `DocumentConverter`.
md_export_kwargs (Dict[str, Any], optional): Kwargs to use in case of markdown export. Defaults to `{"image_placeholder": ""}`.
id_func: (DocIDGenCallable, optional): Doc ID generation function to use. Default: `_uuid4_doc_id_gen`
"""
class ExportType(str, Enum):
MARKDOWN = "markdown"
JSON = "json"
@runtime_checkable
class DocIDGenCallable(Protocol):
def __call__(self, doc: DLDocument, file_path: str | Path) -> str:
...
@staticmethod
def _uuid4_doc_id_gen(doc: DLDocument, file_path: str | Path) -> str:
return str(uuid.uuid4())
export_type: ExportType = ExportType.MARKDOWN
doc_converter: DocumentConverter = Field(default_factory=DocumentConverter)
md_export_kwargs: Dict[str, Any] = {"image_placeholder": ""}
id_func: DocIDGenCallable = _uuid4_doc_id_gen
def lazy_load_data(
self,
file_path: str | Path | Iterable[str] | Iterable[Path],
extra_info: dict | None = None,
fs: Optional[AbstractFileSystem] = None,
) -> Iterable[LIDocument]:
"""Lazily load from given source.
Args:
file_path (str | Path | Iterable[str] | Iterable[Path]): Document file source as single str (URL or local file) or pathlib.Path — or iterable thereof
extra_info (dict | None, optional): Any pre-existing metadata to include. Defaults to None.
Returns:
Iterable[LIDocument]: Iterable over the created LlamaIndex documents.
"""
file_paths = (
file_path
if isinstance(file_path, Iterable) and not isinstance(file_path, str)
else [file_path]
)
for source in file_paths:
dl_doc = self.doc_converter.convert(str(source)).document
text: str
if self.export_type == self.ExportType.MARKDOWN:
text = dl_doc.export_to_markdown(**self.md_export_kwargs)
elif self.export_type == self.ExportType.JSON:
text = json.dumps(dl_doc.export_to_dict())
else:
raise ValueError(f"Unexpected export type: {self.export_type}")
li_doc = LIDocument(
doc_id=self.id_func(doc=dl_doc, file_path=source),
text=text,
)
li_doc.metadata = extra_info or {}
yield li_doc
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
'jina-hubble-sdk>=0.11.0',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'jina-hubble-sdk>=0.10.0',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'qdrant-client~=0.7.3',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.2',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
'jina-hubble-sdk>=0.11.0',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'jina-hubble-sdk>=0.10.0',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'qdrant-client~=0.7.3',
'elasticsearch>=8.2.0',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.2',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'elasticsearch>=8.2.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar')))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar')))
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
from mmdet.utils import update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.cnn import VGG
from mmcv.runner import BaseModule
from ..builder import BACKBONES
from ..necks import ssd_neck
@BACKBONES.register_module()
class SSDVGG(VGG, BaseModule):
"""VGG Backbone network for single-shot-detection.
Args:
depth (int): Depth of vgg, from {11, 13, 16, 19}.
with_last_pool (bool): Whether to add a pooling layer at the last
of the model
ceil_mode (bool): When True, will use `ceil` instead of `floor`
to compute the output shape.
out_indices (Sequence[int]): Output from which stages.
out_feature_indices (Sequence[int]): Output from which feature map.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
input_size (int, optional): Deprecated argumment.
Width and height of input, from {300, 512}.
l2_norm_scale (float, optional) : Deprecated argumment.
L2 normalization layer init scale.
Example:
>>> self = SSDVGG(input_size=300, depth=11)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 300, 300)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 1024, 19, 19)
(1, 512, 10, 10)
(1, 256, 5, 5)
(1, 256, 3, 3)
(1, 256, 1, 1)
"""
extra_setting = {
300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
}
def __init__(self,
depth,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
pretrained=None,
init_cfg=None,
input_size=None,
l2_norm_scale=None):
# TODO: in_channels for mmcv.VGG
super(SSDVGG, self).__init__(
depth,
with_last_pool=with_last_pool,
ceil_mode=ceil_mode,
out_indices=out_indices)
self.features.add_module(
str(len(self.features)),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
self.features.add_module(
str(len(self.features)),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.features.add_module(
str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.out_feature_indices = out_feature_indices
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if init_cfg is not None:
self.init_cfg = init_cfg
elif isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
dict(type='Normal', std=0.01, layer='Linear'),
]
else:
raise TypeError('pretrained must be a str or None')
if input_size is not None:
warnings.warn('DeprecationWarning: input_size is deprecated')
if l2_norm_scale is not None:
warnings.warn('DeprecationWarning: l2_norm_scale in VGG is '
'deprecated, it has been moved to SSDNeck.')
def init_weights(self, pretrained=None):
super(VGG, self).init_weights()
def forward(self, x):
"""Forward function."""
outs = []
for i, layer in enumerate(self.features):
x = layer(x)
if i in self.out_feature_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
class L2Norm(ssd_neck.L2Norm):
def __init__(self, **kwargs):
super(L2Norm, self).__init__(**kwargs)
warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py '
'is deprecated, please use L2Norm in '
'mmdet/models/necks/ssd_neck.py instead')
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.cnn import VGG
from mmcv.runner import BaseModule
from ..builder import BACKBONES
from ..necks import ssd_neck
@BACKBONES.register_module()
class SSDVGG(VGG, BaseModule):
"""VGG Backbone network for single-shot-detection.
Args:
depth (int): Depth of vgg, from {11, 13, 16, 19}.
with_last_pool (bool): Whether to add a pooling layer at the last
of the model
ceil_mode (bool): When True, will use `ceil` instead of `floor`
to compute the output shape.
out_indices (Sequence[int]): Output from which stages.
out_feature_indices (Sequence[int]): Output from which feature map.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
input_size (int, optional): Deprecated argumment.
Width and height of input, from {300, 512}.
l2_norm_scale (float, optional) : Deprecated argumment.
L2 normalization layer init scale.
Example:
>>> self = SSDVGG(input_size=300, depth=11)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 300, 300)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 1024, 19, 19)
(1, 512, 10, 10)
(1, 256, 5, 5)
(1, 256, 3, 3)
(1, 256, 1, 1)
"""
extra_setting = {
300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
}
def __init__(self,
depth,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
pretrained=None,
init_cfg=None,
input_size=None,
l2_norm_scale=None):
# TODO: in_channels for mmcv.VGG
super(SSDVGG, self).__init__(
depth,
with_last_pool=with_last_pool,
ceil_mode=ceil_mode,
out_indices=out_indices)
self.features.add_module(
str(len(self.features)),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
self.features.add_module(
str(len(self.features)),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.features.add_module(
str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.out_feature_indices = out_feature_indices
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be setting at the same time'
if init_cfg is not None:
self.init_cfg = init_cfg
elif isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
dict(type='Normal', std=0.01, layer='Linear'),
]
else:
raise TypeError('pretrained must be a str or None')
if input_size is not None:
warnings.warn('DeprecationWarning: input_size is deprecated')
if l2_norm_scale is not None:
warnings.warn('DeprecationWarning: l2_norm_scale in VGG is '
'deprecated, it has been moved to SSDNeck.')
def init_weights(self, pretrained=None):
super(VGG, self).init_weights()
def forward(self, x):
"""Forward function."""
outs = []
for i, layer in enumerate(self.features):
x = layer(x)
if i in self.out_feature_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
class L2Norm(ssd_neck.L2Norm):
def __init__(self, **kwargs):
super(L2Norm, self).__init__(**kwargs)
warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py '
'is deprecated, please use L2Norm in '
'mmdet/models/necks/ssd_neck.py instead')
|
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[proto]"',
'lz4': '"docarray[proto]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'elastic_transport': '"docarray[elasticsearch]"',
'weaviate': '"docarray[weaviate]"',
'qdrant_client': '"docarray[qdrant]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'hubble': '"docarray[jac]"',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
'redis': '"docarray[redis]"',
}
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[proto]"',
'lz4': '"docarray[proto]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'elastic_transport': '"docarray[elasticsearch]"',
'weaviate': '"docarray[weaviate]"',
'qdrant_client': '"docarray[qdrant]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'hubble': '"docarray[jac]"',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
}
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py'
model = dict(bbox_head=dict(transform_method='partial_minmax'))
|
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py'
model = dict(bbox_head=dict(transform_method='partial_minmax'))
|
import warnings
from typing import TYPE_CHECKING, List, Optional, Tuple, TypeVar
from docarray.typing import ImageBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image import ImageNdArray
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.mimetypes import IMAGE_MIMETYPE
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from PIL import Image as PILImage
T = TypeVar('T', bound='ImageUrl')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to an image file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def mime_type(cls) -> str:
return IMAGE_MIMETYPE
@classmethod
def extra_extensions(cls) -> List[str]:
"""
Returns a list of additional file extensions that are valid for this class
but cannot be identified by the mimetypes library.
"""
return []
def load_pil(self, timeout: Optional[float] = None) -> 'PILImage.Image':
"""
Load the image from the bytes into a `PIL.Image.Image` instance
---
```python
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import ImageUrl
img_url = "https://upload.wikimedia.org/wikipedia/commons/8/80/Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
img_url = parse_obj_as(ImageUrl, img_url)
img = img_url.load_pil()
from PIL.Image import Image
assert isinstance(img, Image)
```
---
:return: a Pillow image
"""
from docarray.typing.bytes.image_bytes import ImageBytes
return ImageBytes(self.load_bytes(timeout=timeout)).load_pil()
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> ImageNdArray:
"""
Load the data from the url into an [`ImageNdArray`][docarray.typing.ImageNdArray]
---
```python
from docarray import BaseDoc
from docarray.typing import ImageUrl, ImageNdArray
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, ImageNdArray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: [`ImageNdArray`][docarray.typing.ImageNdArray] representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def load_bytes(self, timeout: Optional[float] = None) -> ImageBytes:
"""
Convert url to [`ImageBytes`][docarray.typing.ImageBytes]. This will either load or
download the file and save it into an [`ImageBytes`][docarray.typing.ImageBytes] object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: [`ImageBytes`][docarray.typing.ImageBytes] object
"""
bytes_ = super().load_bytes(timeout=timeout)
return ImageBytes(bytes_)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Optional, Tuple, TypeVar
from docarray.typing import ImageBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image import ImageNdArray
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from PIL import Image as PILImage
T = TypeVar('T', bound='ImageUrl')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to an image file.
Can be remote (web) URL, or a local file path.
"""
def load_pil(self, timeout: Optional[float] = None) -> 'PILImage.Image':
"""
Load the image from the bytes into a `PIL.Image.Image` instance
---
```python
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import ImageUrl
img_url = "https://upload.wikimedia.org/wikipedia/commons/8/80/Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
img_url = parse_obj_as(ImageUrl, img_url)
img = img_url.load_pil()
from PIL.Image import Image
assert isinstance(img, Image)
```
---
:return: a Pillow image
"""
from docarray.typing.bytes.image_bytes import ImageBytes
return ImageBytes(self.load_bytes(timeout=timeout)).load_pil()
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> ImageNdArray:
"""
Load the data from the url into an [`ImageNdArray`][docarray.typing.ImageNdArray]
---
```python
from docarray import BaseDoc
from docarray.typing import ImageUrl, ImageNdArray
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, ImageNdArray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: [`ImageNdArray`][docarray.typing.ImageNdArray] representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def load_bytes(self, timeout: Optional[float] = None) -> ImageBytes:
"""
Convert url to [`ImageBytes`][docarray.typing.ImageBytes]. This will either load or
download the file and save it into an [`ImageBytes`][docarray.typing.ImageBytes] object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: [`ImageBytes`][docarray.typing.ImageBytes] object
"""
bytes_ = super().load_bytes(timeout=timeout)
return ImageBytes(bytes_)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
"""Standard LangChain interface tests"""
import base64
from pathlib import Path
from typing import Literal, cast
import httpx
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, HumanMessage
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import ChatOpenAI
REPO_ROOT_DIR = Path(__file__).parents[6]
class TestOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o-mini", "stream_usage": True}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> dict[
Literal["invoke", "stream"],
list[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {"invoke": ["reasoning_output", "cache_read_input"], "stream": []}
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
llm = ChatOpenAI(model="gpt-4o-mini", stream_usage=True)
_invoke(llm, input_, stream)
# invoke twice so first invocation is cached
return _invoke(llm, input_, stream)
def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage:
llm = ChatOpenAI(model="o1-mini", stream_usage=True, temperature=1)
input_ = (
"explain the relationship between the 2008/9 economic crisis and the "
"startup ecosystem in the early 2010s"
)
return _invoke(llm, input_, stream)
@property
def supports_pdf_inputs(self) -> bool:
# OpenAI requires a filename for PDF inputs
# For now, we test with filename in OpenAI-specific tests
return False
def test_openai_pdf_inputs(self, model: BaseChatModel) -> None:
"""Test that the model can process PDF inputs."""
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
pdf_data = base64.b64encode(httpx.get(url).content).decode("utf-8")
message = HumanMessage(
[
{"type": "text", "text": "Summarize this document:"},
{
"type": "file",
"source_type": "base64",
"mime_type": "application/pdf",
"data": pdf_data,
"metadata": {"filename": "my-pdf"}, # OpenAI requires a filename
},
]
)
_ = model.invoke([message])
def _invoke(llm: ChatOpenAI, input_: str, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
|
"""Standard LangChain interface tests"""
from pathlib import Path
from typing import Literal, cast
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import ChatOpenAI
REPO_ROOT_DIR = Path(__file__).parents[6]
class TestOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o-mini", "stream_usage": True}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> dict[
Literal["invoke", "stream"],
list[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {"invoke": ["reasoning_output", "cache_read_input"], "stream": []}
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
llm = ChatOpenAI(model="gpt-4o-mini", stream_usage=True)
_invoke(llm, input_, stream)
# invoke twice so first invocation is cached
return _invoke(llm, input_, stream)
def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage:
llm = ChatOpenAI(model="o1-mini", stream_usage=True, temperature=1)
input_ = (
"explain the relationship between the 2008/9 economic crisis and the "
"startup ecosystem in the early 2010s"
)
return _invoke(llm, input_, stream)
def _invoke(llm: ChatOpenAI, input_: str, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
|
from __future__ import annotations
from collections.abc import Sequence
from copy import deepcopy
from typing import Any, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.utils import get_from_dict_or_env
from pydantic import ConfigDict, model_validator
@deprecated(
since="0.0.30", removal="1.0", alternative_import="langchain_cohere.CohereRerank"
)
class CohereRerank(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
client: Any = None
"""Cohere client to use for compressing documents."""
top_n: Optional[int] = 3
"""Number of documents to return."""
model: str = "rerank-english-v2.0"
"""Model to use for reranking."""
cohere_api_key: Optional[str] = None
"""Cohere API key. Must be specified directly or via environment variable
COHERE_API_KEY."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
if not values.get("client"):
try:
import cohere
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
client_name = values.get("user_agent", "langchain")
values["client"] = cohere.Client(cohere_api_key, client_name=client_name)
return values
def rerank(
self,
documents: Sequence[Union[str, Document, dict]],
query: str,
*,
model: Optional[str] = None,
top_n: Optional[int] = -1,
max_chunks_per_doc: Optional[int] = None,
) -> list[dict[str, Any]]:
"""Returns an ordered list of documents ordered by their relevance to the provided query.
Args:
query: The query to use for reranking.
documents: A sequence of documents to rerank.
model: The model to use for re-ranking. Default to self.model.
top_n : The number of results to return. If None returns all results.
Defaults to self.top_n.
max_chunks_per_doc : The maximum number of chunks derived from a document.
""" # noqa: E501
if len(documents) == 0: # to avoid empty api call
return []
docs = [
doc.page_content if isinstance(doc, Document) else doc for doc in documents
]
model = model or self.model
top_n = top_n if (top_n is None or top_n > 0) else self.top_n
results = self.client.rerank(
query=query,
documents=docs,
model=model,
top_n=top_n,
max_chunks_per_doc=max_chunks_per_doc,
)
if hasattr(results, "results"):
results = getattr(results, "results")
result_dicts = []
for res in results:
result_dicts.append(
{
"index": res.index,
"relevance_score": res.relevance_score,
}
)
return result_dicts
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
compressed = []
for res in self.rerank(documents, query):
doc = documents[res["index"]]
doc_copy = Document(doc.page_content, metadata=deepcopy(doc.metadata))
doc_copy.metadata["relevance_score"] = res["relevance_score"]
compressed.append(doc_copy)
return compressed
|
from __future__ import annotations
from collections.abc import Sequence
from copy import deepcopy
from typing import Any, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import Document
from langchain_core.utils import get_from_dict_or_env
from pydantic import ConfigDict, model_validator
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
@deprecated(
since="0.0.30", removal="1.0", alternative_import="langchain_cohere.CohereRerank"
)
class CohereRerank(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
client: Any = None
"""Cohere client to use for compressing documents."""
top_n: Optional[int] = 3
"""Number of documents to return."""
model: str = "rerank-english-v2.0"
"""Model to use for reranking."""
cohere_api_key: Optional[str] = None
"""Cohere API key. Must be specified directly or via environment variable
COHERE_API_KEY."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
if not values.get("client"):
try:
import cohere
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
client_name = values.get("user_agent", "langchain")
values["client"] = cohere.Client(cohere_api_key, client_name=client_name)
return values
def rerank(
self,
documents: Sequence[Union[str, Document, dict]],
query: str,
*,
model: Optional[str] = None,
top_n: Optional[int] = -1,
max_chunks_per_doc: Optional[int] = None,
) -> list[dict[str, Any]]:
"""Returns an ordered list of documents ordered by their relevance to the provided query.
Args:
query: The query to use for reranking.
documents: A sequence of documents to rerank.
model: The model to use for re-ranking. Default to self.model.
top_n : The number of results to return. If None returns all results.
Defaults to self.top_n.
max_chunks_per_doc : The maximum number of chunks derived from a document.
""" # noqa: E501
if len(documents) == 0: # to avoid empty api call
return []
docs = [
doc.page_content if isinstance(doc, Document) else doc for doc in documents
]
model = model or self.model
top_n = top_n if (top_n is None or top_n > 0) else self.top_n
results = self.client.rerank(
query=query,
documents=docs,
model=model,
top_n=top_n,
max_chunks_per_doc=max_chunks_per_doc,
)
if hasattr(results, "results"):
results = getattr(results, "results")
result_dicts = []
for res in results:
result_dicts.append(
{
"index": res.index,
"relevance_score": res.relevance_score,
}
)
return result_dicts
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
compressed = []
for res in self.rerank(documents, query):
doc = documents[res["index"]]
doc_copy = Document(doc.page_content, metadata=deepcopy(doc.metadata))
doc_copy.metadata["relevance_score"] = res["relevance_score"]
compressed.append(doc_copy)
return compressed
|
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ActivityRegularization")
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0.0, l2=0.0, **kwargs):
super().__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs
)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
base_config.pop("activity_regularizer", None)
config = {"l1": self.l1, "l2": self.l2}
return {**base_config, **config}
|
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ActivityRegularization")
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0.0, l2=0.0, **kwargs):
super().__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs
)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {"l1": self.l1, "l2": self.l2}
return {**base_config, **config}
|
from __future__ import annotations
from typing import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
) -> None:
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(OnlineContrastiveLoss, self).__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor, size_average=False) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
) -> None:
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(OnlineContrastiveLoss, self).__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, size_average=False) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
import os
from pathlib import Path
from typing import List, Optional, Tuple, Union
import torch
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform
_SAMPLE_RATE = 16000
_SPEAKERS = [
"Aditi",
"Amy",
"Brian",
"Emma",
"Geraint",
"Ivy",
"Joanna",
"Joey",
"Justin",
"Kendra",
"Kimberly",
"Matthew",
"Nicole",
"Raveena",
"Russell",
"Salli",
]
def _load_labels(file: Path, subset: str):
"""Load transcirpt, iob, and intent labels for all utterances.
Args:
file (Path): The path to the label file.
subset (str): Subset of the dataset to use. Options: [``"train"``, ``"valid"``, ``"test"``].
Returns:
Dictionary of labels, where the key is the filename of the audio,
and the label is a Tuple of transcript, Inside–outside–beginning (IOB) label, and intention label.
"""
labels = {}
with open(file, "r") as f:
for line in f:
line = line.strip().split(" ")
index = line[0]
trans, iob_intent = " ".join(line[1:]).split("\t")
trans = " ".join(trans.split(" ")[1:-1])
iob = " ".join(iob_intent.split(" ")[1:-1])
intent = iob_intent.split(" ")[-1]
if subset in index:
labels[index] = (trans, iob, intent)
return labels
class Snips(Dataset):
"""*Snips* :cite:`coucke2018snips` dataset.
Args:
root (str or Path): Root directory where the dataset's top level directory is found.
subset (str): Subset of the dataset to use. Options: [``"train"``, ``"valid"``, ``"test"``].
speakers (List[str] or None, optional): The speaker list to include in the dataset. If ``None``,
include all speakers in the subset. (Default: ``None``)
audio_format (str, optional): The extension of the audios. Options: [``"mp3"``, ``"wav"``].
(Default: ``"mp3"``)
"""
_trans_file = "all.iob.snips.txt"
def __init__(
self,
root: Union[str, Path],
subset: str,
speakers: Optional[List[str]] = None,
audio_format: str = "mp3",
) -> None:
if subset not in ["train", "valid", "test"]:
raise ValueError('`subset` must be one of ["train", "valid", "test"].')
if audio_format not in ["mp3", "wav"]:
raise ValueError('`audio_format` must be one of ["mp3", "wav].')
root = Path(root)
self._path = root / "SNIPS"
self.audio_path = self._path / subset
if speakers is None:
speakers = _SPEAKERS
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found.")
self.audio_paths = self.audio_path.glob(f"*.{audio_format}")
self.data = []
for audio_path in sorted(self.audio_paths):
audio_name = str(audio_path.name)
speaker = audio_name.split("-")[0]
if speaker in speakers:
self.data.append(audio_path)
transcript_path = self._path / self._trans_file
self.labels = _load_labels(transcript_path, subset)
def get_metadata(self, n: int) -> Tuple[str, int, str, str, str]:
"""Get metadata for the n-th sample from the dataset. Returns filepath instead of waveform,
but otherwise returns the same fields as :py:func:`__getitem__`.
Args:
n (int): The index of the sample to be loaded.
Returns:
Tuple of the following items:
str:
Path to audio
int:
Sample rate
str:
File name
str:
Transcription of audio
str:
Inside–outside–beginning (IOB) label of transcription
str:
Intention label of the audio.
"""
audio_path = self.data[n]
relpath = os.path.relpath(audio_path, self._path)
file_name = audio_path.with_suffix("").name
transcript, iob, intent = self.labels[file_name]
return relpath, _SAMPLE_RATE, file_name, transcript, iob, intent
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items:
Tensor:
Waveform
int:
Sample rate
str:
File name
str:
Transcription of audio
str:
Inside–outside–beginning (IOB) label of transcription
str:
Intention label of the audio.
"""
metadata = self.get_metadata(n)
waveform = _load_waveform(self._path, metadata[0], metadata[1])
return (waveform,) + metadata[1:]
def __len__(self) -> int:
return len(self.data)
|
import os
from pathlib import Path
from typing import List, Optional, Tuple, Union
import torch
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform
_SAMPLE_RATE = 16000
_SPEAKERS = [
"Aditi",
"Amy",
"Brian",
"Emma",
"Geraint",
"Ivy",
"Joanna",
"Joey",
"Justin",
"Kendra",
"Kimberly",
"Matthew",
"Nicole",
"Raveena",
"Russell",
"Salli",
]
def _load_labels(file: Path, subset: str):
"""Load transcirpt, iob, and intent labels for all utterances.
Args:
file (Path): The path to the label file.
subset (str): Subset of the dataset to use. Options: [``"train"``, ``"valid"``, ``"test"``].
Returns:
Dictionary of labels, where the key is the filename of the audio,
and the label is a Tuple of transcript, Inside–outside–beginning (IOB) label, and intention label.
"""
labels = {}
with open(file, "r") as f:
for line in f:
line = line.strip().split(" ")
index = line[0]
trans, iob_intent = " ".join(line[1:]).split("\t")
trans = " ".join(trans.split(" ")[1:-1])
iob = " ".join(iob_intent.split(" ")[1:-1])
intent = iob_intent.split(" ")[-1]
if subset in index:
labels[index] = (trans, iob, intent)
return labels
class Snips(Dataset):
"""*Snips* :cite:`coucke2018snips` dataset.
Args:
root (str or Path): Root directory where the dataset's top level directory is found.
subset (str): Subset of the dataset to use. Options: [``"train"``, ``"valid"``, ``"test"``].
speakers (List[str] or None, optional): The speaker list to include in the dataset. If ``None``,
include all speakers in the subset. (Default: ``None``)
audio_format (str, optional): The extension of the audios. Options: [``"mp3"``, ``"wav"``].
(Default: ``"mp3"``)
"""
_trans_file = "all.iob.snips.txt"
def __init__(
self,
root: Union[str, Path],
subset: str,
speakers: Optional[List[str]] = None,
audio_format: str = "mp3",
) -> None:
if subset not in ["train", "valid", "test"]:
raise ValueError('`subset` must be one of ["train", "valid", "test"].')
if audio_format not in ["mp3", "wav"]:
raise ValueError('`audio_format` must be one of ["mp3", "wav].')
root = Path(root)
self._path = root / "SNIPS"
self.audio_path = self._path / subset
if speakers is None:
speakers = _SPEAKERS
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found.")
self.audio_paths = self.audio_path.glob(f"*.{audio_format}")
self.data = []
for audio_path in sorted(self.audio_paths):
audio_name = str(audio_path.name)
speaker = audio_name.split("-")[0]
if speaker in speakers:
self.data.append(audio_path)
transcript_path = self._path / self._trans_file
self.labels = _load_labels(transcript_path, subset)
def get_metadata(self, n: int) -> Tuple[str, int, str, str, str]:
"""Get metadata for the n-th sample from the dataset. Returns filepath instead of waveform,
but otherwise returns the same fields as :py:func:`__getitem__`.
Args:
n (int): The index of the sample to be loaded.
Returns:
Tuple of the following items:
str:
Path to audio
int:
Sample rate
str:
Transcription of audio
str:
Inside–outside–beginning (IOB) label of transcription
str:
Intention label of the audio.
"""
audio_path = self.data[n]
relpath = os.path.relpath(audio_path, self._path)
file_name = audio_path.with_suffix("").name
transcript, iob, intent = self.labels[file_name]
return relpath, _SAMPLE_RATE, transcript, iob, intent
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items:
Tensor:
Waveform
int:
Sample rate
str:
Transcription of audio
str:
Inside–outside–beginning (IOB) label of transcription
str:
Intention label of the audio.
"""
metadata = self.get_metadata(n)
waveform = _load_waveform(self._path, metadata[0], metadata[1])
return (waveform,) + metadata[1:]
def __len__(self) -> int:
return len(self.data)
|
"""Async utils."""
import asyncio
from itertools import zip_longest
from typing import Any, Coroutine, Iterable, List, Optional, TypeVar
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
def asyncio_module(show_progress: bool = False) -> Any:
if show_progress:
from tqdm.asyncio import tqdm_asyncio
module = tqdm_asyncio
else:
module = asyncio
return module
def asyncio_run(coro: Coroutine) -> Any:
"""
Gets an existing event loop to run the coroutine.
If there is no existing event loop, creates a new one.
"""
try:
# Check if there's an existing event loop
loop = asyncio.get_event_loop()
# If we're here, there's an existing loop but it's not running
return loop.run_until_complete(coro)
except RuntimeError as e:
# If we can't get the event loop, we're likely in a different thread, or its already running
try:
return asyncio.run(coro)
except RuntimeError as e:
raise RuntimeError(
"Detected nested async. Please use nest_asyncio.apply() to allow nested event loops."
"Or, use async entry methods like `aquery()`, `aretriever`, `achat`, etc."
)
def run_async_tasks(
tasks: List[Coroutine],
show_progress: bool = False,
progress_bar_desc: str = "Running async tasks",
) -> List[Any]:
"""Run a list of async tasks."""
tasks_to_execute: List[Any] = tasks
if show_progress:
try:
import nest_asyncio
from tqdm.asyncio import tqdm
# jupyter notebooks already have an event loop running
# we need to reuse it instead of creating a new one
nest_asyncio.apply()
loop = asyncio.get_event_loop()
async def _tqdm_gather() -> List[Any]:
return await tqdm.gather(*tasks_to_execute, desc=progress_bar_desc)
tqdm_outputs: List[Any] = loop.run_until_complete(_tqdm_gather())
return tqdm_outputs
# run the operation w/o tqdm on hitting a fatal
# may occur in some environments where tqdm.asyncio
# is not supported
except Exception:
pass
async def _gather() -> List[Any]:
return await asyncio.gather(*tasks_to_execute)
outputs: List[Any] = asyncio_run(_gather())
return outputs
def chunks(iterable: Iterable, size: int) -> Iterable:
args = [iter(iterable)] * size
return zip_longest(*args, fillvalue=None)
async def batch_gather(
tasks: List[Coroutine], batch_size: int = 10, verbose: bool = False
) -> List[Any]:
output: List[Any] = []
for task_chunk in chunks(tasks, batch_size):
task_chunk = (task for task in task_chunk if task is not None)
output_chunk = await asyncio.gather(*task_chunk)
output.extend(output_chunk)
if verbose:
print(f"Completed {len(output)} out of {len(tasks)} tasks")
return output
def get_asyncio_module(show_progress: bool = False) -> Any:
if show_progress:
from tqdm.asyncio import tqdm_asyncio
module = tqdm_asyncio
else:
module = asyncio
return module
DEFAULT_NUM_WORKERS = 4
T = TypeVar("T")
@dispatcher.span
async def run_jobs(
jobs: List[Coroutine[Any, Any, T]],
show_progress: bool = False,
workers: int = DEFAULT_NUM_WORKERS,
desc: Optional[str] = None,
) -> List[T]:
"""
Run jobs.
Args:
jobs (List[Coroutine]):
List of jobs to run.
show_progress (bool):
Whether to show progress bar.
Returns:
List[Any]:
List of results.
"""
semaphore = asyncio.Semaphore(workers)
@dispatcher.span
async def worker(job: Coroutine) -> Any:
async with semaphore:
return await job
pool_jobs = [worker(job) for job in jobs]
if show_progress:
from tqdm.asyncio import tqdm_asyncio
results = await tqdm_asyncio.gather(*pool_jobs, desc=desc)
else:
results = await asyncio.gather(*pool_jobs)
return results
|
"""Async utils."""
import asyncio
from itertools import zip_longest
from typing import Any, Coroutine, Iterable, List, Optional, TypeVar
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
def asyncio_module(show_progress: bool = False) -> Any:
if show_progress:
from tqdm.asyncio import tqdm_asyncio
module = tqdm_asyncio
else:
module = asyncio
return module
def asyncio_run(coro: Coroutine) -> Any:
"""Gets an existing event loop to run the coroutine.
If there is no existing event loop, creates a new one.
"""
try:
# Check if there's an existing event loop
loop = asyncio.get_event_loop()
# If we're here, there's an existing loop but it's not running
return loop.run_until_complete(coro)
except RuntimeError as e:
# If we can't get the event loop, we're likely in a different thread, or its already running
try:
return asyncio.run(coro)
except RuntimeError as e:
raise RuntimeError(
"Detected nested async. Please use nest_asyncio.apply() to allow nested event loops."
"Or, use async entry methods like `aquery()`, `aretriever`, `achat`, etc."
)
def run_async_tasks(
tasks: List[Coroutine],
show_progress: bool = False,
progress_bar_desc: str = "Running async tasks",
) -> List[Any]:
"""Run a list of async tasks."""
tasks_to_execute: List[Any] = tasks
if show_progress:
try:
import nest_asyncio
from tqdm.asyncio import tqdm
# jupyter notebooks already have an event loop running
# we need to reuse it instead of creating a new one
nest_asyncio.apply()
loop = asyncio.get_event_loop()
async def _tqdm_gather() -> List[Any]:
return await tqdm.gather(*tasks_to_execute, desc=progress_bar_desc)
tqdm_outputs: List[Any] = loop.run_until_complete(_tqdm_gather())
return tqdm_outputs
# run the operation w/o tqdm on hitting a fatal
# may occur in some environments where tqdm.asyncio
# is not supported
except Exception:
pass
async def _gather() -> List[Any]:
return await asyncio.gather(*tasks_to_execute)
outputs: List[Any] = asyncio_run(_gather())
return outputs
def chunks(iterable: Iterable, size: int) -> Iterable:
args = [iter(iterable)] * size
return zip_longest(*args, fillvalue=None)
async def batch_gather(
tasks: List[Coroutine], batch_size: int = 10, verbose: bool = False
) -> List[Any]:
output: List[Any] = []
for task_chunk in chunks(tasks, batch_size):
task_chunk = (task for task in task_chunk if task is not None)
output_chunk = await asyncio.gather(*task_chunk)
output.extend(output_chunk)
if verbose:
print(f"Completed {len(output)} out of {len(tasks)} tasks")
return output
def get_asyncio_module(show_progress: bool = False) -> Any:
if show_progress:
from tqdm.asyncio import tqdm_asyncio
module = tqdm_asyncio
else:
module = asyncio
return module
DEFAULT_NUM_WORKERS = 4
T = TypeVar("T")
@dispatcher.span
async def run_jobs(
jobs: List[Coroutine[Any, Any, T]],
show_progress: bool = False,
workers: int = DEFAULT_NUM_WORKERS,
desc: Optional[str] = None,
) -> List[T]:
"""Run jobs.
Args:
jobs (List[Coroutine]):
List of jobs to run.
show_progress (bool):
Whether to show progress bar.
Returns:
List[Any]:
List of results.
"""
semaphore = asyncio.Semaphore(workers)
@dispatcher.span
async def worker(job: Coroutine) -> Any:
async with semaphore:
return await job
pool_jobs = [worker(job) for job in jobs]
if show_progress:
from tqdm.asyncio import tqdm_asyncio
results = await tqdm_asyncio.gather(*pool_jobs, desc=desc)
else:
results = await asyncio.gather(*pool_jobs)
return results
|
# mypy: allow-untyped-defs
from .base_structured_sparsifier import BaseStructuredSparsifier
class SaliencyPruner(BaseStructuredSparsifier):
"""
Prune rows based on the saliency (L1 norm) of each row.
This pruner works on N-Dimensional weight tensors.
For each row, we will calculate the saliency, which is the sum the L1 norm of all weights in that row.
We expect that the resulting saliency vector has the same shape as our mask.
We then pick elements to remove until we reach the target sparsity_level.
"""
def update_mask(self, module, tensor_name, **kwargs):
# tensor_name will give you the FQN, all other entries in sparse config is present in kwargs
weights = getattr(module, tensor_name)
mask = getattr(module.parametrizations, tensor_name)[0].mask
# use negative weights so we can use topk (we prune out the smallest)
if weights.dim() <= 1:
raise Exception( # noqa: TRY002
"Structured pruning can only be applied to a 2+dim weight tensor!"
)
saliency = -weights.norm(dim=tuple(range(1, weights.dim())), p=1)
assert saliency.shape == mask.shape
num_to_pick = int(len(mask) * kwargs["sparsity_level"])
prune = saliency.topk(num_to_pick).indices
# Set the mask to be false for the rows we want to prune
mask.data[prune] = False
|
# mypy: allow-untyped-defs
from .base_structured_sparsifier import BaseStructuredSparsifier
class SaliencyPruner(BaseStructuredSparsifier):
"""
Prune rows based on the saliency (L1 norm) of each row.
This pruner works on N-Dimensional weight tensors.
For each row, we will calculate the saliency, whic is the sum the L1 norm of all weights in that row.
We expect that the resulting saliency vector has the same shape as our mask.
We then pick elements to remove until we reach the target sparsity_level.
"""
def update_mask(self, module, tensor_name, **kwargs):
# tensor_name will give you the FQN, all other entries in sparse config is present in kwargs
weights = getattr(module, tensor_name)
mask = getattr(module.parametrizations, tensor_name)[0].mask
# use negative weights so we can use topk (we prune out the smallest)
if weights.dim() <= 1:
raise Exception( # noqa: TRY002
"Structured pruning can only be applied to a 2+dim weight tensor!"
)
saliency = -weights.norm(dim=tuple(range(1, weights.dim())), p=1)
assert saliency.shape == mask.shape
num_to_pick = int(len(mask) * kwargs["sparsity_level"])
prune = saliency.topk(num_to_pick).indices
# Set the mask to be false for the rows we want to prune
mask.data[prune] = False
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
self.built = True
def call(self, inputs):
return ops.normalize(inputs, axis=self.axis, order=2, epsilon=1e-12)
def compute_output_shape(self, input_shape):
# Ensure axis is always treated as a list
if isinstance(self.axis, int):
axes = [self.axis]
else:
axes = self.axis
for axis in axes:
if axis >= len(input_shape) or axis < -len(input_shape):
raise ValueError(
f"Axis {self.axis} is out of bounds for "
f"input shape {input_shape}."
)
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
self.built = True
def call(self, inputs):
x = ops.cast(inputs, self.compute_dtype)
square_sum = ops.sum(ops.square(x), axis=self.axis, keepdims=True)
x_inv_norm = ops.rsqrt(ops.maximum(square_sum, 1e-12))
return ops.multiply(x, x_inv_norm)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
|
"""
Opensearch reader over REST api.
This only uses the basic search api, so it will work Opensearch.
"""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class OpensearchReader(BaseReader):
"""
Read documents from an Opensearch index.
These documents can then be used in a downstream Llama Index data structure.
Args:
endpoint (str): URL (http/https) of cluster without port
index (str): Name of the index (required)
basic_auth (set): basic authentication username password
"""
def __init__(
self, host: str, port: int, index: str, basic_auth: Optional[set] = None
):
"""Initialize with parameters."""
from opensearchpy import OpenSearch
self._opster_client = OpenSearch(
hosts=[{"host": host, "port": port}],
http_compress=True, # enables gzip compression for request bodies
http_auth=basic_auth,
use_ssl=True,
verify_certs=False,
ssl_assert_hostname=False,
ssl_show_warn=False,
)
self._index = index
def load_data(
self,
field: str,
query: Optional[dict] = None,
embedding_field: Optional[str] = None,
) -> List[Document]:
"""
Read data from the Opensearch index.
Args:
field (str): Field in the document to retrieve text from
query (Optional[dict]): Opensearch JSON query DSL object.
For example:
{ "query" : {"match": {"message": {"query": "this is a test"}}}}
embedding_field (Optional[str]): If there are embeddings stored in
this index, this field can be used
to set the embedding field on the returned Document list.
Returns:
List[Document]: A list of documents.
"""
res = self._opster_client.search(body=query, index=self._index)
documents = []
for hit in res["hits"]["hits"]:
value = hit["_source"][field]
_ = hit["_source"].pop(field)
embedding = hit["_source"].get(embedding_field or "", None)
documents.append(
Document(text=value, extra_info=hit["_source"], embedding=embedding)
)
return documents
|
"""Opensearch reader over REST api.
This only uses the basic search api, so it will work Opensearch.
"""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class OpensearchReader(BaseReader):
"""
Read documents from an Opensearch index.
These documents can then be used in a downstream Llama Index data structure.
Args:
endpoint (str): URL (http/https) of cluster without port
index (str): Name of the index (required)
basic_auth (set): basic authentication username password
"""
def __init__(
self, host: str, port: int, index: str, basic_auth: Optional[set] = None
):
"""Initialize with parameters."""
from opensearchpy import OpenSearch
self._opster_client = OpenSearch(
hosts=[{"host": host, "port": port}],
http_compress=True, # enables gzip compression for request bodies
http_auth=basic_auth,
use_ssl=True,
verify_certs=False,
ssl_assert_hostname=False,
ssl_show_warn=False,
)
self._index = index
def load_data(
self,
field: str,
query: Optional[dict] = None,
embedding_field: Optional[str] = None,
) -> List[Document]:
"""Read data from the Opensearch index.
Args:
field (str): Field in the document to retrieve text from
query (Optional[dict]): Opensearch JSON query DSL object.
For example:
{ "query" : {"match": {"message": {"query": "this is a test"}}}}
embedding_field (Optional[str]): If there are embeddings stored in
this index, this field can be used
to set the embedding field on the returned Document list.
Returns:
List[Document]: A list of documents.
"""
res = self._opster_client.search(body=query, index=self._index)
documents = []
for hit in res["hits"]["hits"]:
value = hit["_source"][field]
_ = hit["_source"].pop(field)
embedding = hit["_source"].get(embedding_field or "", None)
documents.append(
Document(text=value, extra_info=hit["_source"], embedding=embedding)
)
return documents
|
"""
This example computes the score between a query and all possible
sentences in a corpus using a Cross-Encoder for semantic textual similarity (STS).
It output then the most similar sentences for the given query.
"""
import numpy as np
from sentence_transformers.cross_encoder import CrossEncoder
# Pre-trained cross encoder
model = CrossEncoder("cross-encoder/stsb-distilroberta-base")
# We want to compute the similarity between the query sentence
query = "A man is eating pasta."
# With all sentences in the corpus
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# 1. We rank all sentences in the corpus for the query
ranks = model.rank(query, corpus)
# Print the scores
print("Query:", query)
for rank in ranks:
print(f"{rank['score']:.2f}\t{corpus[rank['corpus_id']]}")
# 2. Alternatively, you can also manually compute the score between two sentences
sentence_combinations = [[query, sentence] for sentence in corpus]
scores = model.predict(sentence_combinations)
# Sort the scores in decreasing order to get the corpus indices
ranked_indices = np.argsort(scores)[::-1]
print("scores:", scores)
print("indices:", ranked_indices)
|
"""
This example computes the score between a query and all possible
sentences in a corpus using a Cross-Encoder for semantic textual similarity (STS).
It output then the most similar sentences for the given query.
"""
from sentence_transformers.cross_encoder import CrossEncoder
import numpy as np
# Pre-trained cross encoder
model = CrossEncoder('cross-encoder/stsb-distilroberta-base')
# We want to compute the similarity between the query sentence
query = 'A man is eating pasta.'
# With all sentences in the corpus
corpus = ['A man is eating food.',
'A man is eating a piece of bread.',
'The girl is carrying a baby.',
'A man is riding a horse.',
'A woman is playing violin.',
'Two men pushed carts through the woods.',
'A man is riding a white horse on an enclosed ground.',
'A monkey is playing drums.',
'A cheetah is running behind its prey.'
]
# So we create the respective sentence combinations
sentence_combinations = [[query, corpus_sentence] for corpus_sentence in corpus]
# Compute the similarity scores for these combinations
similarity_scores = model.predict(sentence_combinations)
# Sort the scores in decreasing order
sim_scores_argsort = reversed(np.argsort(similarity_scores))
# Print the scores
print("Query:", query)
for idx in sim_scores_argsort:
print("{:.2f}\t{}".format(similarity_scores[idx], corpus[idx]))
|
from typing import Any, Dict
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class UniformTemporalSubsample(Transform):
"""[BETA] Uniformly subsample ``num_samples`` indices from the temporal dimension of the video.
.. v2betastatus:: UniformTemporalSubsample transform
Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` denotes the temporal dimension.
When ``num_samples`` is larger than the size of temporal dimension of the video, it
will sample frames based on nearest neighbor interpolation.
Args:
num_samples (int): The number of equispaced samples to be selected
"""
_transformed_types = (is_simple_tensor, datapoints.Video)
def __init__(self, num_samples: int):
super().__init__()
self.num_samples = num_samples
def _transform(self, inpt: datapoints._VideoType, params: Dict[str, Any]) -> datapoints._VideoType:
return F.uniform_temporal_subsample(inpt, self.num_samples)
|
from typing import Any, Dict
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class UniformTemporalSubsample(Transform):
"""[BETA] Uniformly subsample ``num_samples`` indices from the temporal dimension of the video.
.. betastatus:: UniformTemporalSubsample transform
Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` denotes the temporal dimension.
When ``num_samples`` is larger than the size of temporal dimension of the video, it
will sample frames based on nearest neighbor interpolation.
Args:
num_samples (int): The number of equispaced samples to be selected
"""
_transformed_types = (is_simple_tensor, datapoints.Video)
def __init__(self, num_samples: int):
super().__init__()
self.num_samples = num_samples
def _transform(self, inpt: datapoints._VideoType, params: Dict[str, Any]) -> datapoints._VideoType:
return F.uniform_temporal_subsample(inpt, self.num_samples)
|
import pytest
from importlib.util import find_spec
from llama_index.core.storage.kvstore.types import BaseKVStore
from llama_index.storage.kvstore.postgres import PostgresKVStore
no_packages = find_spec("psycopg2") is None or find_spec("sqlalchemy") is None or find_spec("asyncpg") is None
def test_class():
names_of_base_classes = [b.__name__ for b in PostgresKVStore.__mro__]
assert BaseKVStore.__name__ in names_of_base_classes
@pytest.mark.skipif(
no_packages, reason="asyncpg, pscopg2-binary and sqlalchemy not installed"
)
def test_initialization():
errors = []
try:
pgstore1 = PostgresKVStore(table_name="mytable")
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore2 = PostgresKVStore(table_name="mytable", connection_string="connection_string")
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore3 = PostgresKVStore(table_name="mytable", async_connection_string="async_connection_string")
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore4 = PostgresKVStore(table_name="mytable", connection_string="connection_string", async_connection_string="async_connection_string")
errors.append(0)
except ValueError:
errors.append(1)
assert sum(errors) == 3
assert pgstore4._engine is None
assert pgstore4._async_engine is None
|
import pytest
from importlib.util import find_spec
from llama_index.core.storage.kvstore.types import BaseKVStore
from llama_index.storage.kvstore.postgres import PostgresKVStore
no_packages = find_spec("psycopg2") is not None and find_spec("sqlalchemy") is not None and find_spec("asyncpg") is not None
def test_class():
names_of_base_classes = [b.__name__ for b in PostgresKVStore.__mro__]
assert BaseKVStore.__name__ in names_of_base_classes
@pytest.mark.skipif(
no_packages, reason="asyncpg, pscopg2-binary and sqlalchemy not installed"
)
def test_initialization():
errors = []
try:
pgstore1 = PostgresKVStore(table_name="mytable")
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore2 = PostgresKVStore(table_name="mytable", connection_string="connection_string")
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore3 = PostgresKVStore(table_name="mytable", async_connection_string="async_connection_string")
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore4 = PostgresKVStore(table_name="mytable", connection_string="connection_string", async_connection_string="async_connection_string")
errors.append(0)
except ValueError:
errors.append(1)
assert sum(errors) == 3
assert pgstore4._engine is None
assert pgstore4._async_engine is None
|
"""Utils for manipulating images."""
import base64
from io import BytesIO
from typing import cast
from PIL import Image
from PIL.ImageFile import ImageFile
def img_2_b64(image: ImageFile, format: str = "JPEG") -> str:
"""
Convert a PIL.Image to a base64 encoded image string.
Args:
image (ImageFile): The PIL Image object to be converted.
format (str, optional): The image format to save as. Defaults to "JPEG".
Returns:
str: A base64 encoded string representation of the image.
"""
buff = BytesIO()
image.save(buff, format=format)
return cast(str, base64.b64encode(buff.getvalue()))
def b64_2_img(data: str) -> ImageFile:
"""
Convert base64 encoded image string to a PIL.Image.
Args:
data (str): The base64 encoded image string.
Returns:
ImageFile: A PIL Image object.
"""
buff = BytesIO(base64.b64decode(data))
return cast(ImageFile, Image.open(buff))
|
"""Utils for manipulating images."""
import base64
from io import BytesIO
from typing import cast
from PIL import Image
from PIL.ImageFile import ImageFile
def img_2_b64(image: ImageFile, format: str = "JPEG") -> str:
"""Convert a PIL.Image to a base64 encoded image string.
Args:
image (ImageFile): The PIL Image object to be converted.
format (str, optional): The image format to save as. Defaults to "JPEG".
Returns:
str: A base64 encoded string representation of the image.
"""
buff = BytesIO()
image.save(buff, format=format)
return cast(str, base64.b64encode(buff.getvalue()))
def b64_2_img(data: str) -> ImageFile:
"""Convert base64 encoded image string to a PIL.Image.
Args:
data (str): The base64 encoded image string.
Returns:
ImageFile: A PIL Image object.
"""
buff = BytesIO(base64.b64decode(data))
return cast(ImageFile, Image.open(buff))
|
"""Trello reader."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class TrelloReader(BaseReader):
"""
Trello reader. Reads data from Trello boards and cards.
Args:
api_key (str): Trello API key.
api_token (str): Trello API token.
"""
def __init__(self, api_key: str, api_token: str) -> None:
"""Initialize Trello reader."""
self.api_key = api_key
self.api_token = api_token
def load_data(self, board_id: str) -> List[Document]:
"""
Load data from a Trello board.
Args:
board_id (str): Trello board ID.
Returns:
List[Document]: List of documents representing Trello cards.
"""
from trello import TrelloClient
client = TrelloClient(api_key=self.api_key, token=self.api_token)
board = client.get_board(board_id)
cards = board.get_cards()
documents = []
for card in cards:
document = Document(
doc_id=card.name,
text=card.description,
extra_info={
"id": card.id,
"url": card.url,
"due_date": card.due_date,
"labels": [label.name for label in card.labels],
},
)
documents.append(document)
return documents
|
"""Trello reader."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class TrelloReader(BaseReader):
"""Trello reader. Reads data from Trello boards and cards.
Args:
api_key (str): Trello API key.
api_token (str): Trello API token.
"""
def __init__(self, api_key: str, api_token: str) -> None:
"""Initialize Trello reader."""
self.api_key = api_key
self.api_token = api_token
def load_data(self, board_id: str) -> List[Document]:
"""Load data from a Trello board.
Args:
board_id (str): Trello board ID.
Returns:
List[Document]: List of documents representing Trello cards.
"""
from trello import TrelloClient
client = TrelloClient(api_key=self.api_key, token=self.api_token)
board = client.get_board(board_id)
cards = board.get_cards()
documents = []
for card in cards:
document = Document(
doc_id=card.name,
text=card.description,
extra_info={
"id": card.id,
"url": card.url,
"due_date": card.due_date,
"labels": [label.name for label in card.labels],
},
)
documents.append(document)
return documents
|
import PIL.Image
import pytest
import torch
import torchvision.transforms.v2.utils
from common_utils import make_bounding_box, make_detection_mask, make_image
from torchvision import datapoints
from torchvision.transforms.v2.functional import to_image_pil
from torchvision.transforms.v2.utils import has_all, has_any
IMAGE = make_image(color_space="RGB")
BOUNDING_BOX = make_bounding_box(format=datapoints.BoundingBoxFormat.XYXY, spatial_size=IMAGE.spatial_size)
MASK = make_detection_mask(size=IMAGE.spatial_size)
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes, datapoints.Mask), True),
((MASK,), (datapoints.Image, datapoints.BoundingBoxes), False),
((BOUNDING_BOX,), (datapoints.Image, datapoints.Mask), False),
((IMAGE,), (datapoints.BoundingBoxes, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask),
True,
),
((), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda obj: isinstance(obj, datapoints.Image),), True),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
((IMAGE,), (datapoints.Image, PIL.Image.Image, torchvision.transforms.v2.utils.is_simple_tensor), True),
(
(torch.Tensor(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.transforms.v2.utils.is_simple_tensor),
True,
),
(
(to_image_pil(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.transforms.v2.utils.is_simple_tensor),
True,
),
],
)
def test_has_any(sample, types, expected):
assert has_any(sample, *types) is expected
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes, datapoints.Mask), True),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes), False),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.BoundingBoxes, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(lambda obj: isinstance(obj, (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask)),),
True,
),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
],
)
def test_has_all(sample, types, expected):
assert has_all(sample, *types) is expected
|
import PIL.Image
import pytest
import torch
import torchvision.transforms.v2.utils
from common_utils import make_bounding_box, make_detection_mask, make_image
from torchvision import datapoints
from torchvision.transforms.v2.functional import to_image_pil
from torchvision.transforms.v2.utils import has_all, has_any
IMAGE = make_image(color_space="RGB")
BOUNDING_BOX = make_bounding_box(format=datapoints.BoundingBoxFormat.XYXY, spatial_size=IMAGE.spatial_size)
MASK = make_detection_mask(size=IMAGE.spatial_size)
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox, datapoints.Mask), True),
((MASK,), (datapoints.Image, datapoints.BoundingBox), False),
((BOUNDING_BOX,), (datapoints.Image, datapoints.Mask), False),
((IMAGE,), (datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda obj: isinstance(obj, datapoints.Image),), True),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
((IMAGE,), (datapoints.Image, PIL.Image.Image, torchvision.transforms.v2.utils.is_simple_tensor), True),
(
(torch.Tensor(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.transforms.v2.utils.is_simple_tensor),
True,
),
(
(to_image_pil(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.transforms.v2.utils.is_simple_tensor),
True,
),
],
)
def test_has_any(sample, types, expected):
assert has_any(sample, *types) is expected
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBox, datapoints.Mask), True),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox), False),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBox, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX), (datapoints.Image, datapoints.BoundingBox, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(lambda obj: isinstance(obj, (datapoints.Image, datapoints.BoundingBox, datapoints.Mask)),),
True,
),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
],
)
def test_has_all(sample, types, expected):
assert has_all(sample, *types) is expected
|
from typing import List, Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray, TorchTensor
from docarray.utils._internal.pydantic import is_pydantic_v2
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int] = None
text: str
class MyDocNested(MyDoc):
image: ImageDoc
lst: List[str]
return MyDocNested
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2")
@pytest.mark.parametrize('doc_vec', [False, True])
def test_to_from_pandas_df(nested_doc_cls, doc_vec):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
lst=["hello", "world"],
),
nested_doc_cls(
text='hello world', image=ImageDoc(), lst=["hello", "world"]
),
]
)
if doc_vec:
da = da.to_doc_vec()
df = da.to_dataframe()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes_',
'lst',
]
).all()
if doc_vec:
da_from_df = DocVec[nested_doc_cls].from_dataframe(df)
assert isinstance(da_from_df, DocVec)
else:
da_from_df = DocList[nested_doc_cls].from_dataframe(df)
assert isinstance(da_from_df, DocList)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc] = None
class Middle(BaseDoc):
img: Optional[ImageDoc] = None
inner: Optional[Inner] = None
class Outer(BaseDoc):
img: Optional[ImageDoc] = None
middle: Optional[Middle] = None
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_from_pandas_without_schema_raise_exception(array_cls):
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
array_cls.from_dataframe(df=df)
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc, array_cls):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
array_cls[nested_doc.__class__].from_dataframe(df=df)
def test_doc_list_error():
class Book(BaseDoc):
title: str
# not testing DocVec bc it already fails here (as it should!)
docs = DocList([Book(title='hello'), Book(title='world')])
with pytest.raises(TypeError):
docs.to_dataframe()
@pytest.mark.proto
def test_union_type_error():
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
DocList[CustomDoc].from_dataframe(docs.to_dataframe())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_dataframe(docs_basic.to_dataframe())
assert docs_copy == docs_basic
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2")
@pytest.mark.parametrize('tensor_type', [NdArray, TorchTensor])
def test_from_to_pandas_tensor_type(tensor_type):
class MyDoc(BaseDoc):
embedding: tensor_type
text: str
image: ImageDoc
da = DocVec[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
],
tensor_type=tensor_type,
)
df_da = da.to_dataframe()
da2 = DocVec[MyDoc].from_dataframe(df_da, tensor_type=tensor_type)
assert da2.tensor_type == tensor_type
assert isinstance(da2.embedding, tensor_type)
|
from typing import List, Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
lst: List[str]
return MyDocNested
@pytest.mark.parametrize('doc_vec', [False, True])
def test_to_from_pandas_df(nested_doc_cls, doc_vec):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
lst=["hello", "world"],
),
nested_doc_cls(
text='hello world', image=ImageDoc(), lst=["hello", "world"]
),
]
)
if doc_vec:
da = da.to_doc_vec()
df = da.to_dataframe()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes_',
'lst',
]
).all()
if doc_vec:
da_from_df = DocVec[nested_doc_cls].from_dataframe(df)
assert isinstance(da_from_df, DocVec)
else:
da_from_df = DocList[nested_doc_cls].from_dataframe(df)
assert isinstance(da_from_df, DocList)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_from_pandas_without_schema_raise_exception(array_cls):
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
array_cls.from_dataframe(df=df)
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc, array_cls):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
array_cls[nested_doc.__class__].from_dataframe(df=df)
def test_doc_list_error():
class Book(BaseDoc):
title: str
# not testing DocVec bc it already fails here (as it should!)
docs = DocList([Book(title='hello'), Book(title='world')])
with pytest.raises(TypeError):
docs.to_dataframe()
@pytest.mark.proto
def test_union_type_error():
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
DocList[CustomDoc].from_dataframe(docs.to_dataframe())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_dataframe(docs_basic.to_dataframe())
assert docs_copy == docs_basic
@pytest.mark.parametrize('tensor_type', [NdArray, TorchTensor])
def test_from_to_pandas_tensor_type(tensor_type):
class MyDoc(BaseDoc):
embedding: tensor_type
text: str
image: ImageDoc
da = DocVec[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
],
tensor_type=tensor_type,
)
df_da = da.to_dataframe()
da2 = DocVec[MyDoc].from_dataframe(df_da, tensor_type=tensor_type)
assert da2.tensor_type == tensor_type
assert isinstance(da2.embedding, tensor_type)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.xception import Xception as Xception
from keras.src.applications.xception import (
decode_predictions as decode_predictions,
)
from keras.src.applications.xception import preprocess_input as preprocess_input
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.xception import Xception
from keras.src.applications.xception import decode_predictions
from keras.src.applications.xception import preprocess_input
|
import os
from pathlib import Path
from jina.constants import __cache_path__
def generate_default_volume_and_workspace(workspace_id=''):
"""automatically generate a docker volume, and an Executor workspace inside it
:param workspace_id: id that will be part of the fallback workspace path. Default is not adding such an id
:return: List of volumes and a workspace string
"""
default_workspace = __cache_path__
container_addr = '/app'
if default_workspace: # use default workspace provided in env var
host_addr = default_workspace
workspace = os.path.relpath(
path=os.path.abspath(default_workspace), start=Path.home()
)
else: # fallback if no custom volume and no default workspace
workspace = os.path.join(__cache_path__, 'executor-workspace')
host_addr = os.path.join(
Path.home(),
workspace,
workspace_id,
)
workspace_in_container = os.path.join(container_addr, workspace)
generated_volumes = [os.path.abspath(host_addr) + f':{container_addr}']
return generated_volumes, workspace_in_container
|
import os
from pathlib import Path
from jina import __cache_path__
def generate_default_volume_and_workspace(workspace_id=''):
"""automatically generate a docker volume, and an Executor workspace inside it
:param workspace_id: id that will be part of the fallback workspace path. Default is not adding such an id
:return: List of volumes and a workspace string
"""
default_workspace = __cache_path__
container_addr = '/app'
if default_workspace: # use default workspace provided in env var
host_addr = default_workspace
workspace = os.path.relpath(
path=os.path.abspath(default_workspace), start=Path.home()
)
else: # fallback if no custom volume and no default workspace
workspace = os.path.join(__cache_path__, 'executor-workspace')
host_addr = os.path.join(
Path.home(),
workspace,
workspace_id,
)
workspace_in_container = os.path.join(container_addr, workspace)
generated_volumes = [os.path.abspath(host_addr) + f':{container_addr}']
return generated_volumes, workspace_in_container
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Sequence, Union
from mmengine.dist import (broadcast_object_list, collect_results,
is_main_process)
class BaseMetric(metaclass=ABCMeta):
"""Base class for a metric.
The metric first processes each batch of data_samples and predictions,
and appends the processed results to the results list. Then it
collects all results together from all ranks if distributed training
is used. Finally, it computes the metrics of the entire dataset.
A subclass of class:`BaseMetric` should assign a meaningful value to the
class attribute `default_prefix`. See the argument `prefix` for details.
Args:
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
"""
default_prefix: Optional[str] = None
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.results: List[Any] = []
self.prefix = prefix or self.default_prefix
if self.prefix is None:
warnings.warn('The prefix is not set in metric class '
f'{self.__class__.__name__}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
@abstractmethod
def process(self, data_batch: Sequence[dict],
predictions: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model.
"""
@abstractmethod
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
def evaluate(self, size: int) -> dict:
"""Evaluate the model performance of the whole dataset after processing
all batches.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation metrics dict on the val dataset. The keys are the
names of the metrics, and the values are corresponding results.
"""
if len(self.results) == 0:
warnings.warn(
f'{self.__class__.__name__} got empty `self.results`. Please '
'ensure that the processed results are properly added into '
'`self.results` in `process` method.')
results = collect_results(self.results, size, self.collect_device)
if is_main_process():
_metrics = self.compute_metrics(results) # type: ignore
# Add prefix to metric names
if self.prefix:
_metrics = {
'/'.join((self.prefix, k)): v
for k, v in _metrics.items()
}
metrics = [_metrics]
else:
metrics = [None] # type: ignore
broadcast_object_list(metrics)
# reset the results list
self.results.clear()
return metrics[0]
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Sequence, Tuple, Union
from mmengine.dist import (broadcast_object_list, collect_results,
is_main_process)
class BaseMetric(metaclass=ABCMeta):
"""Base class for a metric.
The metric first processes each batch of data_samples and predictions,
and appends the processed results to the results list. Then it
collects all results together from all ranks if distributed training
is used. Finally, it computes the metrics of the entire dataset.
A subclass of class:`BaseMetric` should assign a meaningful value to the
class attribute `default_prefix`. See the argument `prefix` for details.
Args:
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
"""
default_prefix: Optional[str] = None
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.results: List[Any] = []
self.prefix = prefix or self.default_prefix
if self.prefix is None:
warnings.warn('The prefix is not set in metric class '
f'{self.__class__.__name__}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
@abstractmethod
def process(self, data_batch: Sequence[Tuple[Any, dict]],
predictions: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, dict]]): A batch of data
from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model.
"""
@abstractmethod
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
def evaluate(self, size: int) -> dict:
"""Evaluate the model performance of the whole dataset after processing
all batches.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation metrics dict on the val dataset. The keys are the
names of the metrics, and the values are corresponding results.
"""
if len(self.results) == 0:
warnings.warn(
f'{self.__class__.__name__} got empty `self.results`. Please '
'ensure that the processed results are properly added into '
'`self.results` in `process` method.')
results = collect_results(self.results, size, self.collect_device)
if is_main_process():
_metrics = self.compute_metrics(results) # type: ignore
# Add prefix to metric names
if self.prefix:
_metrics = {
'/'.join((self.prefix, k)): v
for k, v in _metrics.items()
}
metrics = [_metrics]
else:
metrics = [None] # type: ignore
broadcast_object_list(metrics)
# reset the results list
self.results.clear()
return metrics[0]
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 20 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .build_functions import (build_model_from_cfg, build_runner_from_cfg,
build_scheduler_from_cfg)
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', build_func=build_runner_from_cfg)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model', build_model_from_cfg)
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry(
'parameter scheduler', build_func=build_scheduler_from_cfg)
# manage all kinds of metrics
METRICS = Registry('metric')
# manage evaluator
EVALUATOR = Registry('evaluator')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 20 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from mmengine.registry import build_model_from_cfg, build_runner_from_cfg
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', build_func=build_runner_from_cfg)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model', build_model_from_cfg)
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage evaluator
EVALUATOR = Registry('evaluator')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.core import ConfigType, OptConfigType, SampleList
from mmdet.registry import MODELS
from ..utils.misc import unpack_gt_instances
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_backbone: ConfigType,
teacher_neck: ConfigType,
teacher_bbox_head: ConfigType,
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None) -> None:
super(KnowledgeDistillationSingleStageDetector, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self) -> bool:
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, batch_inputs: Tensor) -> Tensor:
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(batch_inputs)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(batch_inputs)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.loss(x, label_assignment_results,
batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
return losses
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.core import ConfigType, OptConfigType, SampleList
from mmdet.registry import MODELS
from ..utils.misc import unpack_gt_instances
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_backbone: ConfigType,
teacher_neck: ConfigType,
teacher_bbox_head: ConfigType,
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None) -> None:
super(KnowledgeDistillationSingleStageDetector, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self) -> bool:
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, batch_inputs: Tensor) -> Tensor:
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(batch_inputs)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def forward_train(self, batch_inputs: Tensor,
batch_data_samples: SampleList, **kwargs) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(batch_inputs)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.forward_train(x, label_assignment_results,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore)
return losses
|
from __future__ import annotations
from copy import deepcopy
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture(scope="session")
def _splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture()
def splade_bert_tiny_model(_splade_bert_tiny_model: SparseEncoder) -> SparseEncoder:
return deepcopy(_splade_bert_tiny_model)
@pytest.fixture()
def inference_free_splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/inference-free-splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def inference_free_splade_bert_tiny_model_reused() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/inference-free-splade-bert-tiny-nq")
@pytest.fixture()
def csr_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def csr_bert_tiny_model_reused() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
|
from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def splade_bert_tiny_model_reused() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture()
def inference_free_splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/inference-free-splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def inference_free_splade_bert_tiny_model_reused() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/inference-free-splade-bert-tiny-nq")
@pytest.fixture()
def csr_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def csr_bert_tiny_model_reused() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.