input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
class Dropout(Module):
"""Dropout layer.
Args:
dropout: Sets a dropout value for dense layer.
"""
config_keys: list[str] = ["dropout"]
def __init__(self, dropout: float = 0.2):
super().__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.dropout_layer(features["sentence_embedding"])})
return features
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
|
from __future__ import annotations
import json
import os
from torch import Tensor, nn
class Dropout(nn.Module):
"""Dropout layer.
Args:
dropout: Sets a dropout value for dense layer.
"""
def __init__(self, dropout: float = 0.2):
super().__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.dropout_layer(features["sentence_embedding"])})
return features
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dropout": self.dropout}, fOut)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = Dropout(**config)
return model
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["marigold_image_processing"] = ["MarigoldImageProcessor"]
_import_structure["pipeline_marigold_depth"] = ["MarigoldDepthOutput", "MarigoldDepthPipeline"]
_import_structure["pipeline_marigold_intrinsics"] = ["MarigoldIntrinsicsOutput", "MarigoldIntrinsicsPipeline"]
_import_structure["pipeline_marigold_normals"] = ["MarigoldNormalsOutput", "MarigoldNormalsPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .marigold_image_processing import MarigoldImageProcessor
from .pipeline_marigold_depth import MarigoldDepthOutput, MarigoldDepthPipeline
from .pipeline_marigold_intrinsics import MarigoldIntrinsicsOutput, MarigoldIntrinsicsPipeline
from .pipeline_marigold_normals import MarigoldNormalsOutput, MarigoldNormalsPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["marigold_image_processing"] = ["MarigoldImageProcessor"]
_import_structure["pipeline_marigold_depth"] = ["MarigoldDepthOutput", "MarigoldDepthPipeline"]
_import_structure["pipeline_marigold_normals"] = ["MarigoldNormalsOutput", "MarigoldNormalsPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .marigold_image_processing import MarigoldImageProcessor
from .pipeline_marigold_depth import MarigoldDepthOutput, MarigoldDepthPipeline
from .pipeline_marigold_normals import MarigoldNormalsOutput, MarigoldNormalsPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.traversal_paths = traversal_paths
self.batch_size = batch_size
try:
self.model = AudioCLIP(pretrained=model_path).to(device).eval()
except FileNotFoundError:
raise FileNotFoundError(
'Please download AudioCLIP model and set the `model_path` argument.'
)
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if not docs:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
with torch.inference_mode():
for batch in docs.batch(batch_size, traversal_paths):
self._create_embeddings(batch)
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.model.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.traversal_paths = traversal_paths
self.batch_size = batch_size
try:
self.model = AudioCLIP(pretrained=model_path).to(device).eval()
except FileNotFoundError:
raise FileNotFoundError(
'Please download AudioCLIP model and set the `model_path` argument.'
)
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if not docs:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
with torch.no_grad():
for batch in docs.batch(batch_size, traversal_paths):
self._create_embeddings(batch)
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.model.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
from typing import List
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class ContentRetrievalSettings(BaseModel):
text: dict = SchemaField(
description="Text content settings",
default={"maxCharacters": 1000, "includeHtmlTags": False},
advanced=True,
)
highlights: dict = SchemaField(
description="Highlight settings",
default={
"numSentences": 3,
"highlightsPerUrl": 3,
"query": "",
},
advanced=True,
)
summary: dict = SchemaField(
description="Summary settings",
default={"query": ""},
advanced=True,
)
class ExaContentsBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
ids: List[str] = SchemaField(
description="Array of document IDs obtained from searches",
)
contents: ContentRetrievalSettings = SchemaField(
description="Content retrieval settings",
default=ContentRetrievalSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of document contents",
default_factory=list,
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="c52be83f-f8cd-4180-b243-af35f986b461",
description="Retrieves document contents using Exa's contents API",
categories={BlockCategory.SEARCH},
input_schema=ExaContentsBlock.Input,
output_schema=ExaContentsBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"ids": input_data.ids,
"text": input_data.contents.text,
"highlights": input_data.contents.highlights,
"summary": input_data.contents.summary,
}
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
|
from typing import List
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class ContentRetrievalSettings(BaseModel):
text: dict = SchemaField(
description="Text content settings",
default={"maxCharacters": 1000, "includeHtmlTags": False},
advanced=True,
)
highlights: dict = SchemaField(
description="Highlight settings",
default={
"numSentences": 3,
"highlightsPerUrl": 3,
"query": "",
},
advanced=True,
)
summary: dict = SchemaField(
description="Summary settings",
default={"query": ""},
advanced=True,
)
class ExaContentsBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
ids: List[str] = SchemaField(
description="Array of document IDs obtained from searches",
)
contents: ContentRetrievalSettings = SchemaField(
description="Content retrieval settings",
default=ContentRetrievalSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of document contents",
default_factory=list,
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="c52be83f-f8cd-4180-b243-af35f986b461",
description="Retrieves document contents using Exa's contents API",
categories={BlockCategory.SEARCH},
input_schema=ExaContentsBlock.Input,
output_schema=ExaContentsBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"ids": input_data.ids,
"text": input_data.contents.text,
"highlights": input_data.contents.highlights,
"summary": input_data.contents.summary,
}
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
yield "results", []
|
import os
import pickle
from pathlib import Path
from typing import Optional, Tuple
from jina import DocumentArray, Executor, requests
from jina.excepts import ExecutorFailToLoad
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-idf sparse embeddings
"""
def __init__(
self,
path_vectorizer: Optional[str] = None,
batch_size: int = 2048,
traversal_paths: Tuple[str] = ('r',),
*args,
**kwargs,
):
"""
:param path_vectorizer: path of the pre-trained tfidf sklearn vectorizer
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
"""
super().__init__(*args, **kwargs)
if path_vectorizer is None:
path_vectorizer = str(
Path(__file__).parent / 'model/tfidf_vectorizer.pickle'
)
self.path_vectorizer = path_vectorizer
self.batch_size = batch_size
self.traversal_paths = traversal_paths
if os.path.exists(self.path_vectorizer):
self.tfidf_vectorizer = pickle.load(open(self.path_vectorizer, 'rb'))
else:
raise ExecutorFailToLoad(
f'{self.path_vectorizer} not found, cannot find a fitted tfidf_vectorizer'
)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: dict = {}, **kwargs
):
"""
Generate the TF-IDF feature vector for all text documents.
:param docs: documents sent to the encoder. The docs must have `text`.
By default, the input `text` must be a `list` of `str`.
:param parameters: dictionary to define the `traversal_paths` and the
`batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
"""
if docs is None:
return
document_batches_generator = docs.traverse_flat(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
filter_fn=lambda doc: len(doc.text) > 0,
).batch(
batch_size=parameters.get('batch_size', self.batch_size),
)
for document_batch in document_batches_generator:
iterable_of_texts = [d.text for d in document_batch]
embedding_matrix = self.tfidf_vectorizer.transform(iterable_of_texts)
for doc, doc_embedding in zip(document_batch, embedding_matrix):
doc.embedding = doc_embedding
|
import os
import pickle
from pathlib import Path
from typing import Optional, Tuple
from jina import DocumentArray, Executor, requests
from jina.excepts import PretrainedModelFileDoesNotExist
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-idf sparse embeddings
"""
def __init__(
self,
path_vectorizer: Optional[str] = None,
batch_size: int = 2048,
traversal_paths: Tuple[str] = ('r',),
*args,
**kwargs,
):
"""
:param path_vectorizer: path of the pre-trained tfidf sklearn vectorizer
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
"""
super().__init__(*args, **kwargs)
if path_vectorizer is None:
path_vectorizer = str(
Path(__file__).parent / 'model/tfidf_vectorizer.pickle'
)
self.path_vectorizer = path_vectorizer
self.batch_size = batch_size
self.traversal_paths = traversal_paths
if os.path.exists(self.path_vectorizer):
self.tfidf_vectorizer = pickle.load(open(self.path_vectorizer, 'rb'))
else:
raise PretrainedModelFileDoesNotExist(
f'{self.path_vectorizer} not found, cannot find a fitted tfidf_vectorizer'
)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: dict = {}, **kwargs
):
"""
Generate the TF-IDF feature vector for all text documents.
:param docs: documents sent to the encoder. The docs must have `text`.
By default, the input `text` must be a `list` of `str`.
:param parameters: dictionary to define the `traversal_paths` and the
`batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
"""
if docs is None:
return
document_batches_generator = docs.traverse_flat(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
filter_fn=lambda doc:len(doc.text)>0
).batch(
batch_size=parameters.get('batch_size', self.batch_size),
)
for document_batch in document_batches_generator:
iterable_of_texts = [d.text for d in document_batch]
embedding_matrix = self.tfidf_vectorizer.transform(iterable_of_texts)
for doc, doc_embedding in zip(document_batch, embedding_matrix):
doc.embedding = doc_embedding
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# Lint as: python3
from . import tqdm as _tqdm # _tqdm is the module
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
from .tqdm import (
disable_progress_bars,
enable_progress_bars,
are_progress_bars_disabled,
tqdm,
)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# Lint as: python3
"""Util import."""
__all__ = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
|
"""Internal representation of a structured query language."""
from langchain_core.structured_query import (
Comparator,
Comparison,
Expr,
FilterDirective,
Operation,
Operator,
StructuredQuery,
Visitor,
)
__all__ = [
"Comparator",
"Comparison",
"Expr",
"FilterDirective",
"Operation",
"Operator",
"StructuredQuery",
"Visitor",
]
|
"""Internal representation of a structured query language."""
from langchain_core.structured_query import (
Comparator,
Comparison,
Expr,
FilterDirective,
Operation,
Operator,
StructuredQuery,
Visitor,
)
__all__ = [
"Visitor",
"Expr",
"Operator",
"Comparator",
"FilterDirective",
"Comparison",
"Operation",
"StructuredQuery",
]
|
import os
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as
from docarray import BaseDoc
from docarray.typing import (
AudioNdArray,
AudioTorchTensor,
VideoNdArray,
VideoTorchTensor,
)
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.typing.tensor.video import VideoTensorFlowTensor
@pytest.mark.parametrize(
'tensor,cls_video_tensor,cls_tensor',
[
(torch.zeros(1, 224, 224, 3), VideoTorchTensor, torch.Tensor),
(np.zeros((1, 224, 224, 3)), VideoNdArray, np.ndarray),
],
)
def test_set_video_tensor(tensor, cls_video_tensor, cls_tensor):
class MyVideoDoc(BaseDoc):
tensor: cls_video_tensor
doc = MyVideoDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_video_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.tensorflow
def test_set_video_tensor_tensorflow():
class MyVideoDoc(BaseDoc):
tensor: VideoTensorFlowTensor
doc = MyVideoDoc(tensor=tf.zeros((1, 224, 224, 3)))
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
assert tnp.allclose(doc.tensor.tensor, tf.zeros((1, 224, 224, 3)))
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, np.zeros((1, 224, 224, 3))),
(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, np.zeros((1, 224, 224, 3))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.tensorflow
def test_validation_tensorflow():
arr = parse_obj_as(VideoTensorFlowTensor, np.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
arr = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
arr = parse_obj_as(VideoTensorFlowTensor, torch.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, torch.zeros(224, 3)),
(VideoTorchTensor, torch.zeros(1, 224, 224, 100)),
(VideoNdArray, 'hello'),
(VideoTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(
VideoTorchTensor,
torch.zeros(1, 224, 224, 3),
VideoTorchTensor._proto_type_name,
),
(VideoNdArray, np.zeros((1, 224, 224, 3)), VideoNdArray._proto_type_name),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert proto_key in str(proto)
@pytest.mark.tensorflow
def test_proto_tensor_tensorflow():
tensor = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
proto = tensor._to_node_protobuf()
assert VideoTensorFlowTensor._proto_type_name in str(proto)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
def test_save_video_tensor_to_file(video_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
def test_save_video_tensor_to_bytes(video_tensor, tmpdir):
b = video_tensor.to_bytes()
isinstance(b, bytes)
@pytest.mark.tensorflow
def test_save_video_tensorflow_tensor_to_file(tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
video_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
@pytest.mark.parametrize(
'audio_tensor',
[
parse_obj_as(AudioTorchTensor, torch.randn(100, 1, 1024).to(torch.float32)),
parse_obj_as(AudioNdArray, np.random.randn(100, 1, 1024).astype('float32')),
],
)
def test_save_video_tensor_to_file_including_audio(video_tensor, audio_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file, audio_tensor=audio_tensor)
assert os.path.isfile(tmp_file)
|
import os
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as
from docarray import BaseDocument
from docarray.typing import (
AudioNdArray,
AudioTorchTensor,
VideoNdArray,
VideoTorchTensor,
)
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.typing.tensor.video import VideoTensorFlowTensor
@pytest.mark.parametrize(
'tensor,cls_video_tensor,cls_tensor',
[
(torch.zeros(1, 224, 224, 3), VideoTorchTensor, torch.Tensor),
(np.zeros((1, 224, 224, 3)), VideoNdArray, np.ndarray),
],
)
def test_set_video_tensor(tensor, cls_video_tensor, cls_tensor):
class MyVideoDoc(BaseDocument):
tensor: cls_video_tensor
doc = MyVideoDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_video_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.tensorflow
def test_set_video_tensor_tensorflow():
class MyVideoDoc(BaseDocument):
tensor: VideoTensorFlowTensor
doc = MyVideoDoc(tensor=tf.zeros((1, 224, 224, 3)))
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
assert tnp.allclose(doc.tensor.tensor, tf.zeros((1, 224, 224, 3)))
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, np.zeros((1, 224, 224, 3))),
(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, np.zeros((1, 224, 224, 3))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.tensorflow
def test_validation_tensorflow():
arr = parse_obj_as(VideoTensorFlowTensor, np.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
arr = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
arr = parse_obj_as(VideoTensorFlowTensor, torch.zeros((1, 224, 224, 3)))
assert isinstance(arr, VideoTensorFlowTensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(VideoNdArray, torch.zeros(1, 224, 224, 3)),
(VideoTorchTensor, torch.zeros(224, 3)),
(VideoTorchTensor, torch.zeros(1, 224, 224, 100)),
(VideoNdArray, 'hello'),
(VideoTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(
VideoTorchTensor,
torch.zeros(1, 224, 224, 3),
VideoTorchTensor._proto_type_name,
),
(VideoNdArray, np.zeros((1, 224, 224, 3)), VideoNdArray._proto_type_name),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert proto_key in str(proto)
@pytest.mark.tensorflow
def test_proto_tensor_tensorflow():
tensor = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
proto = tensor._to_node_protobuf()
assert VideoTensorFlowTensor._proto_type_name in str(proto)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
def test_save_video_tensor_to_file(video_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
def test_save_video_tensor_to_bytes(video_tensor, tmpdir):
b = video_tensor.to_bytes()
isinstance(b, bytes)
@pytest.mark.tensorflow
def test_save_video_tensorflow_tensor_to_file(tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor = parse_obj_as(VideoTensorFlowTensor, tf.zeros((1, 224, 224, 3)))
video_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.parametrize(
'video_tensor',
[
parse_obj_as(VideoTorchTensor, torch.zeros(1, 224, 224, 3)),
parse_obj_as(VideoNdArray, np.zeros((1, 224, 224, 3))),
],
)
@pytest.mark.parametrize(
'audio_tensor',
[
parse_obj_as(AudioTorchTensor, torch.randn(100, 1, 1024).to(torch.float32)),
parse_obj_as(AudioNdArray, np.random.randn(100, 1, 1024).astype('float32')),
],
)
def test_save_video_tensor_to_file_including_audio(video_tensor, audio_tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.mp4')
video_tensor.save(tmp_file, audio_tensor=audio_tensor)
assert os.path.isfile(tmp_file)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import torch.distributed as dist
from mmcv.runner import DistEvalHook as BaseDistEvalHook
from mmcv.runner import EvalHook as BaseEvalHook
from torch.nn.modules.batchnorm import _BatchNorm
class EvalHook(BaseEvalHook):
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
if not self._should_evaluate(runner):
return
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
class DistEvalHook(BaseDistEvalHook):
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for name, module in model.named_modules():
if isinstance(module,
_BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self._should_evaluate(runner):
return
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
from mmdet.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
|
import os.path as osp
import torch.distributed as dist
from mmcv.runner import DistEvalHook as BaseDistEvalHook
from mmcv.runner import EvalHook as BaseEvalHook
from torch.nn.modules.batchnorm import _BatchNorm
class EvalHook(BaseEvalHook):
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
if not self._should_evaluate(runner):
return
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
class DistEvalHook(BaseDistEvalHook):
def _do_evaluate(self, runner):
"""perform evaluation and save ckpt."""
# Synchronization of BatchNorm's buffer (running_mean
# and running_var) is not supported in the DDP of pytorch,
# which may cause the inconsistent performance of models in
# different ranks, so we broadcast BatchNorm's buffers
# of rank 0 to other ranks to avoid this.
if self.broadcast_bn_buffer:
model = runner.model
for name, module in model.named_modules():
if isinstance(module,
_BatchNorm) and module.track_running_stats:
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self._should_evaluate(runner):
return
tmpdir = self.tmpdir
if tmpdir is None:
tmpdir = osp.join(runner.work_dir, '.eval_hook')
from mmdet.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=tmpdir,
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if self.save_best:
self._save_ckpt(runner, key_score)
|
_base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './mask_rcnn_hrnetv2p_w32_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> datapoints.Image:
"""[BETA] See :class:`~torchvision.transforms.v2.ToImage` for details."""
if isinstance(inpt, np.ndarray):
output = torch.from_numpy(inpt).permute((2, 0, 1)).contiguous()
elif isinstance(inpt, PIL.Image.Image):
output = pil_to_tensor(inpt)
elif isinstance(inpt, torch.Tensor):
output = inpt
else:
raise TypeError(f"Input can either be a numpy array or a PIL image, but got {type(inpt)} instead.")
return datapoints.Image(output)
to_pil_image = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
|
from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> datapoints.Image:
if isinstance(inpt, np.ndarray):
output = torch.from_numpy(inpt).permute((2, 0, 1)).contiguous()
elif isinstance(inpt, PIL.Image.Image):
output = pil_to_tensor(inpt)
elif isinstance(inpt, torch.Tensor):
output = inpt
else:
raise TypeError(f"Input can either be a numpy array or a PIL image, but got {type(inpt)} instead.")
return datapoints.Image(output)
to_pil_image = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
|
from typing import List
import datasets
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""BuilderConfig for ImageFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__()
class ImageFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Image
BASE_COLUMN_NAME = "image"
BUILDER_CONFIG_CLASS = ImageFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
# Obtained with:
# ```
# import PIL.Image
# IMAGE_EXTENSIONS = []
# PIL.Image.init()
# for ext, format in PIL.Image.EXTENSION.items():
# if format in PIL.Image.OPEN:
# IMAGE_EXTENSIONS.append(ext[1:])
# ```
# We intentionally do not run this code on launch because:
# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
IMAGE_EXTENSIONS = [
".blp",
".bmp",
".dib",
".bufr",
".cur",
".pcx",
".dcx",
".dds",
".ps",
".eps",
".fit",
".fits",
".fli",
".flc",
".ftc",
".ftu",
".gbr",
".gif",
".grib",
# ".h5", # may contain zero or several images
# ".hdf", # may contain zero or several images
".png",
".apng",
".jp2",
".j2k",
".jpc",
".jpf",
".jpx",
".j2c",
".icns",
".ico",
".im",
".iim",
".tif",
".tiff",
".jfif",
".jpe",
".jpg",
".jpeg",
".mpg",
".mpeg",
".msp",
".pcd",
".pxr",
".pbm",
".pgm",
".ppm",
".pnm",
".psd",
".bw",
".rgb",
".rgba",
".sgi",
".ras",
".tga",
".icb",
".vda",
".vst",
".webp",
".wmf",
".emf",
".xbm",
".xpm",
]
ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS
|
from typing import List
import datasets
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""BuilderConfig for ImageFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__()
class ImageFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Image
BASE_COLUMN_NAME = "image"
BUILDER_CONFIG_CLASS = ImageFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
# Obtained with:
# ```
# import PIL.Image
# IMAGE_EXTENSIONS = []
# PIL.Image.init()
# for ext, format in PIL.Image.EXTENSION.items():
# if format in PIL.Image.OPEN:
# IMAGE_EXTENSIONS.append(ext[1:])
# ```
# We intentionally do not run this code on launch because:
# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
IMAGE_EXTENSIONS = [
".blp",
".bmp",
".dib",
".bufr",
".cur",
".pcx",
".dcx",
".dds",
".ps",
".eps",
".fit",
".fits",
".fli",
".flc",
".ftc",
".ftu",
".gbr",
".gif",
".grib",
".h5",
".hdf",
".png",
".apng",
".jp2",
".j2k",
".jpc",
".jpf",
".jpx",
".j2c",
".icns",
".ico",
".im",
".iim",
".tif",
".tiff",
".jfif",
".jpe",
".jpg",
".jpeg",
".mpg",
".mpeg",
".msp",
".pcd",
".pxr",
".pbm",
".pgm",
".ppm",
".pnm",
".psd",
".bw",
".rgb",
".rgba",
".sgi",
".ras",
".tga",
".icb",
".vda",
".vst",
".webp",
".wmf",
".emf",
".xbm",
".xpm",
]
ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS
|
import pytest
from langchain.evaluation.string_distance import (
PairwiseStringDistanceEvalChain,
StringDistance,
StringDistanceEvalChain,
)
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
def test_zero_distance(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = eval_chain.evaluate_strings(prediction=string, reference=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
async def test_zero_distance_async(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = await eval_chain.aevaluate_strings(prediction=string, reference=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
@pytest.mark.parametrize("normalize_score", [True, False])
def test_zero_distance_pairwise(
distance: StringDistance, normalize_score: bool
) -> None:
eval_chain = PairwiseStringDistanceEvalChain(
distance=distance, normalize_score=normalize_score
)
string = "三人行则必有我师"
result = eval_chain.evaluate_string_pairs(prediction=string, prediction_b=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
async def test_zero_distance_pairwise_async(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = await eval_chain.aevaluate_string_pairs(
prediction=string, prediction_b=string
)
assert "score" in result
assert result["score"] == 0
valid_distances = [
distance for distance in StringDistance if distance != StringDistance.HAMMING
]
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
@pytest.mark.parametrize("normalize_score", [True, False])
def test_non_zero_distance(distance: StringDistance, normalize_score: bool) -> None:
eval_chain = StringDistanceEvalChain(
distance=distance, normalize_score=normalize_score
)
prediction = "I like to eat apples."
reference = "I like apples."
result = eval_chain.evaluate_strings(prediction=prediction, reference=reference)
assert "score" in result
assert result["score"] > 0
if normalize_score:
assert result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
async def test_non_zero_distance_async(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = await eval_chain.aevaluate_strings(
prediction=prediction, reference=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
def test_non_zero_distance_pairwise(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = eval_chain.evaluate_string_pairs(
prediction=prediction, prediction_b=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
async def test_non_zero_distance_pairwise_async(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = await eval_chain.aevaluate_string_pairs(
prediction=prediction, prediction_b=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
|
import pytest
from langchain.evaluation.string_distance import (
PairwiseStringDistanceEvalChain,
StringDistance,
StringDistanceEvalChain,
)
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
def test_zero_distance(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = eval_chain.evaluate_strings(prediction=string, reference=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
async def test_zero_distance_async(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = await eval_chain.aevaluate_strings(prediction=string, reference=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
@pytest.mark.parametrize("normalize_score", [True, False])
def test_zero_distance_pairwise(
distance: StringDistance, normalize_score: bool
) -> None:
eval_chain = PairwiseStringDistanceEvalChain(
distance=distance, normalize_score=normalize_score
)
string = "三人行则必有我师"
result = eval_chain.evaluate_string_pairs(prediction=string, prediction_b=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
async def test_zero_distance_pairwise_async(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = await eval_chain.aevaluate_string_pairs(
prediction=string, prediction_b=string
)
assert "score" in result
assert result["score"] == 0
valid_distances = [
distance for distance in StringDistance if distance != StringDistance.HAMMING
]
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
@pytest.mark.parametrize("normalize_score", [True, False])
def test_non_zero_distance(distance: StringDistance, normalize_score: bool) -> None:
eval_chain = StringDistanceEvalChain(
distance=distance, normalize_score=normalize_score
)
prediction = "I like to eat apples."
reference = "I like apples."
result = eval_chain.evaluate_strings(prediction=prediction, reference=reference)
assert "score" in result
assert 0 < result["score"]
if normalize_score:
assert result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
async def test_non_zero_distance_async(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = await eval_chain.aevaluate_strings(
prediction=prediction, reference=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
def test_non_zero_distance_pairwise(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = eval_chain.evaluate_string_pairs(
prediction=prediction, prediction_b=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
async def test_non_zero_distance_pairwise_async(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = await eval_chain.aevaluate_string_pairs(
prediction=prediction, prediction_b=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
|
"""Standard LangChain interface tests."""
from pathlib import Path
from typing import Literal, cast
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessageChunk
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_anthropic import ChatAnthropic
REPO_ROOT_DIR = Path(__file__).parents[5]
class TestAnthropicStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": "claude-3-5-sonnet-latest"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_pdf_inputs(self) -> bool:
return True
@property
def supports_image_tool_message(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def enable_vcr_tests(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> dict[
Literal["invoke", "stream"],
list[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {
"invoke": ["cache_read_input", "cache_creation_input"],
"stream": ["cache_read_input", "cache_creation_input"],
}
def invoke_with_cache_creation_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620", # type: ignore[call-arg]
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}, # type: ignore[call-arg]
)
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
},
],
},
],
stream,
)
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620", # type: ignore[call-arg]
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}, # type: ignore[call-arg]
)
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
# invoke twice so first invocation is cached
_invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
},
],
},
],
stream,
)
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
},
],
},
],
stream,
)
def _invoke(llm: ChatAnthropic, input_: list, stream: bool) -> AIMessage: # noqa: FBT001
if stream:
full = None
for chunk in llm.stream(input_):
full = cast(BaseMessageChunk, chunk) if full is None else full + chunk
return cast(AIMessage, full)
return cast(AIMessage, llm.invoke(input_))
|
"""Standard LangChain interface tests"""
from pathlib import Path
from typing import Literal, cast
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessageChunk
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_anthropic import ChatAnthropic
REPO_ROOT_DIR = Path(__file__).parents[5]
class TestAnthropicStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": "claude-3-5-sonnet-latest"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_pdf_inputs(self) -> bool:
return True
@property
def supports_image_tool_message(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def enable_vcr_tests(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> dict[
Literal["invoke", "stream"],
list[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {
"invoke": ["cache_read_input", "cache_creation_input"],
"stream": ["cache_read_input", "cache_creation_input"],
}
def invoke_with_cache_creation_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620", # type: ignore[call-arg]
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}, # type: ignore[call-arg]
)
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620", # type: ignore[call-arg]
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}, # type: ignore[call-arg]
)
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
# invoke twice so first invocation is cached
_invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
def _invoke(llm: ChatAnthropic, input_: list, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
if full is None:
full = cast(BaseMessageChunk, chunk)
else:
full = full + chunk
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.nn as nn
from torch.distributed import destroy_process_group, init_process_group
from torch.nn.parallel import DataParallel, DistributedDataParallel
from mmengine.model import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel,
is_model_wrapper, revert_sync_batchnorm)
from mmengine.registry import MODEL_WRAPPERS, Registry
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_revert_syncbn():
# conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
conv = nn.Sequential(nn.Conv2d(3, 8, 2), nn.SyncBatchNorm(8))
x = torch.randn(1, 3, 10, 10)
# Expect a ValueError prompting that SyncBN is not supported on CPU
with pytest.raises(ValueError):
y = conv(x)
conv = revert_sync_batchnorm(conv)
y = conv(x)
assert y.shape == (1, 8, 9, 9)
def test_is_model_wrapper():
# Test basic module wrapper.
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29510'
os.environ['RANK'] = str(0)
init_process_group(backend='gloo', rank=0, world_size=1)
model = nn.Linear(1, 1)
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel
]:
wrapper_model = wrapper(model)
assert is_model_wrapper(wrapper_model)
# Test `is_model_wrapper` can check model wrapper registered in custom
# registry.
CHILD_REGISTRY = Registry('test_is_model_wrapper', parent=MODEL_WRAPPERS)
class CustomModelWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
pass
CHILD_REGISTRY.register_module(module=CustomModelWrapper)
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel, CustomModelWrapper
]:
wrapper_model = wrapper(model)
assert is_model_wrapper(wrapper_model)
# Test `is_model_wrapper` will not check model wrapper in parent
# registry from a child registry.
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel
]:
wrapper_model = wrapper(model)
assert not is_model_wrapper(wrapper_model, registry=CHILD_REGISTRY)
wrapper_model = CustomModelWrapper(model)
assert is_model_wrapper(wrapper_model, registry=CHILD_REGISTRY)
destroy_process_group()
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn as nn
from mmengine.model import revert_sync_batchnorm
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_revert_syncbn():
# conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
conv = nn.Sequential(nn.Conv2d(3, 8, 2), nn.SyncBatchNorm(8))
x = torch.randn(1, 3, 10, 10)
# Expect a ValueError prompting that SyncBN is not supported on CPU
with pytest.raises(ValueError):
y = conv(x)
conv = revert_sync_batchnorm(conv)
y = conv(x)
assert y.shape == (1, 8, 9, 9)
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with
Transformers <https://arxiv.org/pdf/2005.12872>`_"""
def __init__(self,
backbone,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
# over-write `forward_dummy` because:
# the forward of bbox_head requires img_metas
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
warnings.warn('Warning! MultiheadAttention in DETR does not '
'support flops computation! Do not use the '
'results in your papers!')
batch_size, _, height, width = img.shape
dummy_img_metas = [
dict(
batch_input_shape=(height, width),
img_shape=(height, width, 3)) for _ in range(batch_size)
]
x = self.extract_feat(img)
outs = self.bbox_head(x, dummy_img_metas)
return outs
# over-write `onnx_export` because:
# (1) the forward of bbox_head requires img_metas
# (2) the different behavior (e.g. construction of `masks`) between
# torch and ONNX model, during the forward of bbox_head
def onnx_export(self, img, img_metas):
"""Test function for exporting to ONNX, without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
# forward of this head requires img_metas
outs = self.bbox_head.forward_onnx(x, img_metas)
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
return det_bboxes, det_labels
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with
Transformers <https://arxiv.org/pdf/2005.12872>`_"""
def __init__(self,
backbone,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
# over-write `forward_dummy` because:
# the forward of bbox_head requires img_metas
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
warnings.warn('Warning! MultiheadAttention in DETR does not '
'support flops computation! Do not use the '
'results in your papers!')
batch_size, _, height, width = img.shape
dummy_img_metas = [
dict(
batch_input_shape=(height, width),
img_shape=(height, width, 3)) for _ in range(batch_size)
]
x = self.extract_feat(img)
outs = self.bbox_head(x, dummy_img_metas)
return outs
# over-write `onnx_export` because:
# (1) the forward of bbox_head requires img_metas
# (2) the different behavior (e.g. construction of `masks`) between
# torch and ONNX model, during the forward of bbox_head
def onnx_export(self, img, img_metas):
"""Test function for exporting to ONNX, without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
# forward of this head requires img_metas
outs = self.bbox_head.forward_onnx(x, img_metas)
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
return det_bboxes, det_labels
|
"""Document summary index."""
from llama_index.core.indices.document_summary.base import (
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
)
from llama_index.core.indices.document_summary.retrievers import (
DocumentSummaryIndexEmbeddingRetriever,
DocumentSummaryIndexLLMRetriever,
DocumentSummaryIndexRetriever,
)
__all__ = [
"DocumentSummaryIndex",
"DocumentSummaryIndexLLMRetriever",
"DocumentSummaryIndexEmbeddingRetriever",
# legacy
"GPTDocumentSummaryIndex",
"DocumentSummaryIndexRetriever",
]
|
"""Document summary index."""
from llama_index.core.indices.document_summary.base import (
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
)
from llama_index.core.indices.document_summary.retrievers import (
DocumentSummaryIndexEmbeddingRetriever,
DocumentSummaryIndexLLMRetriever,
DocumentSummaryIndexRetriever,
)
__all__ = [
"DocumentSummaryIndex",
"DocumentSummaryIndexLLMRetriever",
"DocumentSummaryIndexEmbeddingRetriever",
# legacy
"GPTDocumentSummaryIndex",
"DocumentSummaryIndexRetriever",
]
|
import numpy as np
from keras.src import testing
from keras.src.datasets import imdb
class ImdbLoadDataTest(testing.TestCase):
def test_load_data_default(self):
(x_train, y_train), (x_test, y_test) = imdb.load_data()
self.assertIsInstance(x_train, np.ndarray)
self.assertIsInstance(y_train, np.ndarray)
self.assertIsInstance(x_test, np.ndarray)
self.assertIsInstance(y_test, np.ndarray)
# Check lengths
self.assertEqual(len(x_train), 25000)
self.assertEqual(len(y_train), 25000)
self.assertEqual(len(x_test), 25000)
self.assertEqual(len(y_test), 25000)
# Check types within lists for x
self.assertIsInstance(x_train[0], list)
self.assertIsInstance(x_test[0], list)
def test_num_words(self):
# Only consider the top 1000 words
(x_train, _), _ = imdb.load_data(num_words=1000)
# Ensure that no word index exceeds 999 (0-based indexing)
max_index = max(max(sequence) for sequence in x_train if sequence)
self.assertLessEqual(max_index, 999)
def test_skip_top(self):
# Skip the top 10 most frequent words
(x_train, _), _ = imdb.load_data(skip_top=10, num_words=1000)
# Check if top 10 words are skipped properly
self.assertNotIn(1, x_train[0]) # Assuming 1 is among top 10
def test_maxlen(self):
# Only consider sequences shorter than 100
(x_train, _), _ = imdb.load_data(maxlen=100)
self.assertTrue(all(len(seq) <= 100 for seq in x_train))
def test_get_word_index(self):
word_index = imdb.get_word_index()
self.assertIsInstance(word_index, dict)
# Check if word_index contains specific known words
self.assertIn("the", word_index)
self.assertIn("and", word_index)
|
import numpy as np
from keras.src import testing
from keras.src.datasets import imdb
class ImdbLoadDataTest(testing.TestCase):
def test_load_data_default(self):
(x_train, y_train), (x_test, y_test) = imdb.load_data()
self.assertIsInstance(x_train, list)
self.assertIsInstance(y_train, np.ndarray)
self.assertIsInstance(x_test, list)
self.assertIsInstance(y_test, np.ndarray)
# Check lengths
self.assertEqual(len(x_train), 25000)
self.assertEqual(len(y_train), 25000)
self.assertEqual(len(x_test), 25000)
self.assertEqual(len(y_test), 25000)
# Check types within lists for x
self.assertIsInstance(x_train[0], list)
self.assertIsInstance(x_test[0], list)
def test_num_words(self):
# Only consider the top 1000 words
(x_train, _), _ = imdb.load_data(num_words=1000)
# Ensure that no word index exceeds 999 (0-based indexing)
max_index = max(max(sequence) for sequence in x_train if sequence)
self.assertLessEqual(max_index, 999)
def test_skip_top(self):
# Skip the top 10 most frequent words
(x_train, _), _ = imdb.load_data(skip_top=10, num_words=1000)
# Check if top 10 words are skipped properly
self.assertNotIn(1, x_train[0]) # Assuming 1 is among top 10
def test_maxlen(self):
# Only consider sequences shorter than 100
(x_train, _), _ = imdb.load_data(maxlen=100)
self.assertTrue(all(len(seq) <= 100 for seq in x_train))
def test_get_word_index(self):
word_index = imdb.get_word_index()
self.assertIsInstance(word_index, dict)
# Check if word_index contains specific known words
self.assertIn("the", word_index)
self.assertIn("and", word_index)
|
from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import torch
from torchvision.transforms import InterpolationMode
from ._feature import _Feature, FillTypeJIT
class Mask(_Feature):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> Mask:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return tuple(self.shape[-2:]) # type: ignore[return-value]
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: Optional[bool] = None,
) -> Mask:
output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
antialias: Optional[bool] = None,
) -> Mask:
output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: Union[int, List[int]],
fill: FillTypeJIT = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self.as_subclass(torch.Tensor),
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Mask:
output = self._F.perspective_mask(
self.as_subclass(torch.Tensor), startpoints, endpoints, fill=fill, coefficients=coefficients
)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
|
from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import torch
from torchvision.transforms import InterpolationMode
from ._feature import _Feature, FillTypeJIT
class Mask(_Feature):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> Mask:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return tuple(self.shape[-2:]) # type: ignore[return-value]
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: bool = False,
) -> Mask:
output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
antialias: bool = False,
) -> Mask:
output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: Union[int, List[int]],
fill: FillTypeJIT = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self.as_subclass(torch.Tensor),
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Mask:
output = self._F.perspective_mask(
self.as_subclass(torch.Tensor), startpoints, endpoints, fill=fill, coefficients=coefficients
)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
|
_base_ = './centernet_r18-dcnv2_8xb16-crop512-140e_coco.py'
model = dict(neck=dict(use_dcn=False))
|
_base_ = './centernet_resnet18_dcnv2_140e_coco.py'
model = dict(neck=dict(use_dcn=False))
|
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class SalesforceToolSpec(BaseToolSpec):
"""
Salesforce tool spec.
Gives the agent the ability to interact with Salesforce using simple_salesforce
"""
spec_functions = ["execute_sosl", "execute_soql"]
def __init__(self, **kargs) -> None:
"""Initialize with parameters for Salesforce connection."""
from simple_salesforce import Salesforce
self.sf = Salesforce(**kargs)
def execute_sosl(self, search: str) -> str:
"""
Returns the result of a Salesforce search as a dict decoded from
the Salesforce response JSON payload.
Arguments:
* search -- the fully formatted SOSL search string, e.g.
`FIND {Waldo}`.
"""
from simple_salesforce import SalesforceError
try:
res = self.sf.search(search)
except SalesforceError as err:
return f"Error running SOSL query: {err}"
return res
def execute_soql(self, query: str) -> str:
"""
Returns the full set of results for the `query`. This is a
convenience wrapper around `query(...)` and `query_more(...)`.
The returned dict is the decoded JSON payload from the final call to
Salesforce, but with the `totalSize` field representing the full
number of results retrieved and the `records` list representing the
full list of records retrieved.
Arguments:
* query -- the SOQL query to send to Salesforce, e.g.
SELECT Id FROM Lead WHERE Email = "waldo@somewhere.com".
"""
from simple_salesforce import SalesforceError
try:
res = self.sf.query_all(query)
except SalesforceError as err:
return f"Error running SOQL query: {err}"
return res
|
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class SalesforceToolSpec(BaseToolSpec):
"""Salesforce tool spec.
Gives the agent the ability to interact with Salesforce using simple_salesforce
"""
spec_functions = ["execute_sosl", "execute_soql"]
def __init__(self, **kargs) -> None:
"""Initialize with parameters for Salesforce connection."""
from simple_salesforce import Salesforce
self.sf = Salesforce(**kargs)
def execute_sosl(self, search: str) -> str:
"""Returns the result of a Salesforce search as a dict decoded from
the Salesforce response JSON payload.
Arguments:
* search -- the fully formatted SOSL search string, e.g.
`FIND {Waldo}`.
"""
from simple_salesforce import SalesforceError
try:
res = self.sf.search(search)
except SalesforceError as err:
return f"Error running SOSL query: {err}"
return res
def execute_soql(self, query: str) -> str:
"""Returns the full set of results for the `query`. This is a
convenience wrapper around `query(...)` and `query_more(...)`.
The returned dict is the decoded JSON payload from the final call to
Salesforce, but with the `totalSize` field representing the full
number of results retrieved and the `records` list representing the
full list of records retrieved.
Arguments:
* query -- the SOQL query to send to Salesforce, e.g.
SELECT Id FROM Lead WHERE Email = "waldo@somewhere.com".
"""
from simple_salesforce import SalesforceError
try:
res = self.sf.query_all(query)
except SalesforceError as err:
return f"Error running SOQL query: {err}"
return res
|
import tempfile
import os
import time
from typing import Dict
import pytest
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(
os.path.join(cur_dir, 'unit', 'array', 'docker-compose.yml')
)
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'docarray_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='module')
def start_storage():
os.system(
f"docker-compose -f {compose_yml} --project-directory . up --build -d "
f"--remove-orphans"
)
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='http://localhost:9200/')
while not es.ping():
time.sleep(0.5)
yield
os.system(
f"docker-compose -f {compose_yml} --project-directory . down "
f"--remove-orphans"
)
@pytest.fixture(scope='session')
def set_env_vars(request):
_old_environ = dict(os.environ)
os.environ.update(request.param)
yield
os.environ.clear()
os.environ.update(_old_environ)
|
import tempfile
import os
import time
import pytest
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(
os.path.join(cur_dir, 'unit', 'array', 'docker-compose.yml')
)
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'docarray_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='module')
def start_storage():
os.system(
f"docker-compose -f {compose_yml} --project-directory . up --build -d "
f"--remove-orphans"
)
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='http://localhost:9200/')
while not es.ping():
time.sleep(0.5)
yield
os.system(
f"docker-compose -f {compose_yml} --project-directory . down "
f"--remove-orphans"
)
|
"""Base class for Gmail tools."""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.gmail.utils import build_resource_service
if TYPE_CHECKING:
# This is for linting and IDE typehints
from googleapiclient.discovery import Resource
else:
try:
# We do this so pydantic can resolve the types when instantiating
from googleapiclient.discovery import Resource
except ImportError:
pass
class GmailBaseTool(BaseTool):
"""Base class for Gmail tools."""
api_resource: Resource = Field(default_factory=build_resource_service)
@classmethod
def from_api_resource(cls, api_resource: Resource) -> "GmailBaseTool":
"""Create a tool from an api resource.
Args:
api_resource: The api resource to use.
Returns:
A tool.
"""
return cls(service=api_resource) # type: ignore[call-arg]
|
"""Base class for Gmail tools."""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.gmail.utils import build_resource_service
if TYPE_CHECKING:
# This is for linting and IDE typehints
from googleapiclient.discovery import Resource
else:
try:
# We do this so pydantic can resolve the types when instantiating
from googleapiclient.discovery import Resource
except ImportError:
pass
class GmailBaseTool(BaseTool): # type: ignore[override]
"""Base class for Gmail tools."""
api_resource: Resource = Field(default_factory=build_resource_service)
@classmethod
def from_api_resource(cls, api_resource: Resource) -> "GmailBaseTool":
"""Create a tool from an api resource.
Args:
api_resource: The api resource to use.
Returns:
A tool.
"""
return cls(service=api_resource) # type: ignore[call-arg]
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from mmcv import ops
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.utils import ConfigType, OptMultiConfig
class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
"""Base class for RoI extractor.
Args:
roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and
arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (list[int]): Strides of input feature maps.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
roi_layer: ConfigType,
out_channels: int,
featmap_strides: List[int],
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
@property
def num_inputs(self) -> int:
"""int: Number of input feature maps."""
return len(self.featmap_strides)
def build_roi_layers(self, layer_cfg: ConfigType,
featmap_strides: List[int]) -> nn.ModuleList:
"""Build RoI operator to extract feature from each level feature map.
Args:
layer_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and
config RoI layer operation. Options are modules under
``mmcv/ops`` such as ``RoIAlign``.
featmap_strides (list[int]): The stride of input feature map w.r.t
to the original image size, which would be used to scale RoI
coordinate (original image coordinate system) to feature
coordinate system.
Returns:
:obj:`nn.ModuleList`: The RoI extractor modules for each level
feature map.
"""
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
if isinstance(layer_type, str):
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
else:
layer_cls = layer_type
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def roi_rescale(self, rois: Tensor, scale_factor: float) -> Tensor:
"""Scale RoI coordinates by scale factor.
Args:
rois (Tensor): RoI (Region of Interest), shape (n, 5)
scale_factor (float): Scale factor that RoI will be multiplied by.
Returns:
Tensor: Scaled RoI.
"""
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1]
h = rois[:, 4] - rois[:, 2]
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5
x2 = cx + new_w * 0.5
y1 = cy - new_h * 0.5
y2 = cy + new_h * 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
@abstractmethod
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None) -> Tensor:
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from mmcv import ops
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.utils import ConfigType, OptMultiConfig
class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
"""Base class for RoI extractor.
Args:
roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and
arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (list[int]): Strides of input feature maps.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
roi_layer: ConfigType,
out_channels: int,
featmap_strides: List[int],
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
@property
def num_inputs(self) -> int:
"""int: Number of input feature maps."""
return len(self.featmap_strides)
def build_roi_layers(self, layer_cfg: ConfigType,
featmap_strides: List[int]) -> nn.ModuleList:
"""Build RoI operator to extract feature from each level feature map.
Args:
layer_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and
config RoI layer operation. Options are modules under
``mmcv/ops`` such as ``RoIAlign``.
featmap_strides (list[int]): The stride of input feature map w.r.t
to the original image size, which would be used to scale RoI
coordinate (original image coordinate system) to feature
coordinate system.
Returns:
:obj:`nn.ModuleList`: The RoI extractor modules for each level
feature map.
"""
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def roi_rescale(self, rois: Tensor, scale_factor: float) -> Tensor:
"""Scale RoI coordinates by scale factor.
Args:
rois (Tensor): RoI (Region of Interest), shape (n, 5)
scale_factor (float): Scale factor that RoI will be multiplied by.
Returns:
Tensor: Scaled RoI.
"""
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1]
h = rois[:, 4] - rois[:, 2]
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5
x2 = cx + new_w * 0.5
y1 = cy - new_h * 0.5
y2 = cy + new_h * 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
@abstractmethod
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None) -> Tensor:
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
pass
|
import multiprocessing
from copy import deepcopy
from functools import partial
from typing import TYPE_CHECKING
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.parsers.helper import _update_gateway_args
if TYPE_CHECKING: # pragma: no cover
from argparse import Namespace
def _get_event(obj) -> multiprocessing.Event:
if isinstance(obj, multiprocessing.Process) or isinstance(
obj, multiprocessing.context.ForkProcess
):
return multiprocessing.Event()
elif isinstance(obj, multiprocessing.context.SpawnProcess):
return multiprocessing.get_context('spawn').Event()
else:
raise TypeError(f'{obj} is not an instance of "multiprocessing.Process"')
class ConditionalEvent:
"""
:class:`ConditionalEvent` provides a common interface to an event (multiprocessing or threading event)
that gets triggered when any of the events provided in input is triggered (OR logic)
:param events_list: The list of events that compose this composable event
"""
def __init__(self, events_list):
super().__init__()
self.event = None
self.event = multiprocessing.synchronize.Event(
ctx=multiprocessing.get_context()
)
self.event_list = events_list
for e in events_list:
self._setup(e, self._state_changed)
self._state_changed()
def _state_changed(self):
bools = [e.is_set() for e in self.event_list]
if any(bools):
self.event.set()
else:
self.event.clear()
def _custom_set(self, e):
e._set()
e._state_changed()
def _custom_clear(self, e):
e._clear()
e._state_changed()
def _setup(self, e, changed_callback):
e._set = e.set
e._clear = e.clear
e._state_changed = changed_callback
e.set = partial(self._custom_set, e)
e.clear = partial(self._custom_clear, e)
def update_runtime_cls(args, copy=False) -> 'Namespace':
"""Get runtime_cls as a string from args
:param args: pod/deployment namespace args
:param copy: True if args shouldn't be modified in-place
:return: runtime class as a string
"""
_args = deepcopy(args) if copy else args
if _args.runtime_cls == 'WorkerRuntime' and is_valid_huburi(_args.uses):
_hub_args = deepcopy(_args)
_hub_args.uri = _args.uses
_hub_args.no_usage = True
_args.uses = HubIO(_hub_args).pull()
if hasattr(_args, 'protocol') and _args.pod_role == PodRoleType.GATEWAY:
_update_gateway_args(_args)
if _args.pod_role == PodRoleType.HEAD:
_args.runtime_cls = 'HeadRuntime'
return _args
|
import multiprocessing
from copy import deepcopy
from functools import partial
from typing import TYPE_CHECKING
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.parsers.helper import _set_gateway_uses
if TYPE_CHECKING: # pragma: no cover
from argparse import Namespace
def _get_event(obj) -> multiprocessing.Event:
if isinstance(obj, multiprocessing.Process) or isinstance(
obj, multiprocessing.context.ForkProcess
):
return multiprocessing.Event()
elif isinstance(obj, multiprocessing.context.SpawnProcess):
return multiprocessing.get_context('spawn').Event()
else:
raise TypeError(f'{obj} is not an instance of "multiprocessing.Process"')
class ConditionalEvent:
"""
:class:`ConditionalEvent` provides a common interface to an event (multiprocessing or threading event)
that gets triggered when any of the events provided in input is triggered (OR logic)
:param events_list: The list of events that compose this composable event
"""
def __init__(self, events_list):
super().__init__()
self.event = None
self.event = multiprocessing.synchronize.Event(
ctx=multiprocessing.get_context()
)
self.event_list = events_list
for e in events_list:
self._setup(e, self._state_changed)
self._state_changed()
def _state_changed(self):
bools = [e.is_set() for e in self.event_list]
if any(bools):
self.event.set()
else:
self.event.clear()
def _custom_set(self, e):
e._set()
e._state_changed()
def _custom_clear(self, e):
e._clear()
e._state_changed()
def _setup(self, e, changed_callback):
e._set = e.set
e._clear = e.clear
e._state_changed = changed_callback
e.set = partial(self._custom_set, e)
e.clear = partial(self._custom_clear, e)
def update_runtime_cls(args, copy=False) -> 'Namespace':
"""Get runtime_cls as a string from args
:param args: pod/deployment namespace args
:param copy: True if args shouldn't be modified in-place
:return: runtime class as a string
"""
_args = deepcopy(args) if copy else args
if _args.runtime_cls == 'WorkerRuntime' and is_valid_huburi(_args.uses):
_hub_args = deepcopy(_args)
_hub_args.uri = _args.uses
_hub_args.no_usage = True
_args.uses = HubIO(_hub_args).pull()
if hasattr(_args, 'protocol') and _args.pod_role == PodRoleType.GATEWAY:
_set_gateway_uses(_args)
if _args.pod_role == PodRoleType.HEAD:
_args.runtime_cls = 'HeadRuntime'
return _args
|
"""Scrapfly Web Reader."""
import logging
from typing import List, Optional, Literal
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
logger = logging.getLogger(__file__)
class ScrapflyReader(BasePydanticReader):
"""
Turn a url to llm accessible markdown with `Scrapfly.io`.
Args:
api_key: The Scrapfly API key.
scrape_config: The Scrapfly ScrapeConfig object.
ignore_scrape_failures: Whether to continue on failures.
urls: List of urls to scrape.
scrape_format: Scrape result format (markdown or text)
For further details, visit: https://scrapfly.io/docs/sdk/python
"""
api_key: str
ignore_scrape_failures: bool = True
scrapfly: Optional["ScrapflyClient"] = None # Declare the scrapfly attribute
def __init__(self, api_key: str, ignore_scrape_failures: bool = True) -> None:
"""Initialize client."""
super().__init__(api_key=api_key, ignore_scrape_failures=ignore_scrape_failures)
try:
from scrapfly import ScrapflyClient
except ImportError:
raise ImportError(
"`scrapfly` package not found, please run `pip install scrapfly-sdk`"
)
self.scrapfly = ScrapflyClient(key=api_key)
@classmethod
def class_name(cls) -> str:
return "Scrapfly_reader"
def load_data(
self,
urls: List[str],
scrape_format: Literal["markdown", "text"] = "markdown",
scrape_config: Optional[dict] = None,
) -> List[Document]:
"""
Load data from the urls.
Args:
urls: List[str]): List of URLs to scrape.
scrape_config: Optional[dict]: Dictionary of ScrapFly scrape config object.
Returns:
List[Document]: List of documents.
Raises:
ValueError: If URLs aren't provided.
"""
from scrapfly import ScrapeApiResponse, ScrapeConfig
if urls is None:
raise ValueError("URLs must be provided.")
scrape_config = scrape_config if scrape_config is not None else {}
documents = []
for url in urls:
try:
response: ScrapeApiResponse = self.scrapfly.scrape(
ScrapeConfig(url, format=scrape_format, **scrape_config)
)
documents.append(
Document(
text=response.scrape_result["content"], extra_info={"url": url}
)
)
except Exception as e:
if self.ignore_scrape_failures:
logger.error(f"Error fetching data from {url}, exception: {e}")
else:
raise e # noqa: TRY201
return documents
|
"""Scrapfly Web Reader."""
import logging
from typing import List, Optional, Literal
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
logger = logging.getLogger(__file__)
class ScrapflyReader(BasePydanticReader):
"""
Turn a url to llm accessible markdown with `Scrapfly.io`.
Args:
api_key: The Scrapfly API key.
scrape_config: The Scrapfly ScrapeConfig object.
ignore_scrape_failures: Whether to continue on failures.
urls: List of urls to scrape.
scrape_format: Scrape result format (markdown or text)
For further details, visit: https://scrapfly.io/docs/sdk/python
"""
api_key: str
ignore_scrape_failures: bool = True
scrapfly: Optional["ScrapflyClient"] = None # Declare the scrapfly attribute
def __init__(self, api_key: str, ignore_scrape_failures: bool = True) -> None:
"""Initialize client."""
super().__init__(api_key=api_key, ignore_scrape_failures=ignore_scrape_failures)
try:
from scrapfly import ScrapflyClient
except ImportError:
raise ImportError(
"`scrapfly` package not found, please run `pip install scrapfly-sdk`"
)
self.scrapfly = ScrapflyClient(key=api_key)
@classmethod
def class_name(cls) -> str:
return "Scrapfly_reader"
def load_data(
self,
urls: List[str],
scrape_format: Literal["markdown", "text"] = "markdown",
scrape_config: Optional[dict] = None,
) -> List[Document]:
"""
Load data from the urls.
Args:
urls: List[str]): List of URLs to scrape.
scrape_config: Optional[dict]: Dictionary of ScrapFly scrape config object.
Returns:
List[Document]: List of documents.
Raises:
ValueError: If URLs aren't provided.
"""
from scrapfly import ScrapeApiResponse, ScrapeConfig
if urls is None:
raise ValueError("URLs must be provided.")
scrape_config = scrape_config if scrape_config is not None else {}
documents = []
for url in urls:
try:
response: ScrapeApiResponse = self.scrapfly.scrape(
ScrapeConfig(url, format=scrape_format, **scrape_config)
)
documents.append(
Document(
text=response.scrape_result["content"], extra_info={"url": url}
)
)
except Exception as e:
if self.ignore_scrape_failures:
logger.error(f"Error fetching data from {url}, exception: {e}")
else:
raise e # noqa: TRY201
return documents
|
from docarray.array.mixins.proto import ProtoArrayMixin
|
from .proto import ProtoArrayMixin
|
"""Utils for LLM Compiler."""
import ast
import re
from typing import Any, Dict, List, Sequence, Tuple, Union
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import BaseTool, adapt_to_async_tool
from .schema import (
LLMCompilerParseResult,
LLMCompilerTask,
)
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
def default_dependency_rule(idx: int, args: str) -> bool:
"""Default dependency rule."""
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
def parse_llm_compiler_action_args(args: str) -> Union[List, Tuple]:
"""Parse arguments from a string."""
# This will convert the string into a python object
# e.g. '"Ronaldo number of kids"' -> ("Ronaldo number of kids", )
# '"I can answer the question now.", [3]' -> ("I can answer the question now.", [3])
if args == "":
return ()
try:
eval_args: Union[List, Tuple, str] = ast.literal_eval(args)
except Exception:
eval_args = args
if not isinstance(eval_args, list) and not isinstance(eval_args, tuple):
new_args: Union[List, Tuple] = (eval_args,)
else:
new_args = eval_args
return new_args
def _find_tool(tool_name: str, tools: Sequence[BaseTool]) -> BaseTool:
"""
Find a tool by name.
Args:
tool_name: Name of the tool to find.
Returns:
Tool or StructuredTool.
"""
for tool in tools:
if tool.metadata.name == tool_name:
return tool
raise ValueError(f"Tool {tool_name} not found.")
def _get_dependencies_from_graph(idx: int, tool_name: str, args: str) -> List[int]:
"""Get dependencies from a graph."""
if tool_name == "join":
# depends on the previous step
dependencies = list(range(1, idx))
else:
# define dependencies based on the dependency rule in tool_definitions.py
dependencies = [i for i in range(1, idx) if default_dependency_rule(i, args)]
return dependencies
def instantiate_new_step(
tools: Sequence[BaseTool],
idx: int,
tool_name: str,
args: str,
thought: str,
) -> LLMCompilerTask:
"""Instantiate a new step."""
dependencies = _get_dependencies_from_graph(idx, tool_name, args)
args_list = parse_llm_compiler_action_args(args)
if tool_name == "join":
# tool: Optional[BaseTool] = None
# assume that the only tool that returns None is join
tool: BaseTool = FunctionTool.from_defaults(fn=lambda x: None)
else:
tool = _find_tool(tool_name, tools)
return LLMCompilerTask(
idx=idx,
name=tool_name,
tool=adapt_to_async_tool(tool),
args=args_list,
dependencies=dependencies,
# TODO: look into adding a stringify rule
# stringify_rule=stringify_rule,
thought=thought,
is_join=tool_name == "join",
)
def get_graph_dict(
parse_results: List[LLMCompilerParseResult],
tools: Sequence[BaseTool],
) -> Dict[int, Any]:
"""Get graph dict."""
graph_dict = {}
for parse_result in parse_results:
# idx = 1, function = "search", args = "Ronaldo number of kids"
# thought will be the preceding thought, if any, otherwise an empty string
# thought, idx, tool_name, args, _ = match
idx = int(parse_result.idx)
task = instantiate_new_step(
tools=tools,
idx=idx,
tool_name=parse_result.tool_name,
args=parse_result.args,
thought=parse_result.thought,
)
graph_dict[idx] = task
if task.is_join:
break
return graph_dict
def generate_context_for_replanner(
tasks: Dict[int, LLMCompilerTask], joiner_thought: str
) -> str:
"""
Generate context for replanning.
Formatted like this.
```
1. action 1
Observation: xxx
2. action 2
Observation: yyy
...
Thought: joinner_thought
```
"""
previous_plan_and_observations = "\n".join(
[
task.get_thought_action_observation(
include_action=True, include_action_idx=True
)
for task in tasks.values()
if not task.is_join
]
)
joiner_thought = f"Thought: {joiner_thought}"
# use f-string instead
return f"{previous_plan_and_observations}\n\n{joiner_thought}"
def format_contexts(contexts: Sequence[str]) -> str:
"""
Format contexts.
Taken from https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/llm_compiler.py
Contexts is a list of context.
Each context is formatted as the description of generate_context_for_replanner
"""
formatted_contexts = ""
for context in contexts:
formatted_contexts += f"Previous Plan:\n\n{context}\n\n"
formatted_contexts += "Current Plan:\n\n"
return formatted_contexts
|
"""Utils for LLM Compiler."""
import ast
import re
from typing import Any, Dict, List, Sequence, Tuple, Union
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import BaseTool, adapt_to_async_tool
from .schema import (
LLMCompilerParseResult,
LLMCompilerTask,
)
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
def default_dependency_rule(idx: int, args: str) -> bool:
"""Default dependency rule."""
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
def parse_llm_compiler_action_args(args: str) -> Union[List, Tuple]:
"""Parse arguments from a string."""
# This will convert the string into a python object
# e.g. '"Ronaldo number of kids"' -> ("Ronaldo number of kids", )
# '"I can answer the question now.", [3]' -> ("I can answer the question now.", [3])
if args == "":
return ()
try:
eval_args: Union[List, Tuple, str] = ast.literal_eval(args)
except Exception:
eval_args = args
if not isinstance(eval_args, list) and not isinstance(eval_args, tuple):
new_args: Union[List, Tuple] = (eval_args,)
else:
new_args = eval_args
return new_args
def _find_tool(tool_name: str, tools: Sequence[BaseTool]) -> BaseTool:
"""Find a tool by name.
Args:
tool_name: Name of the tool to find.
Returns:
Tool or StructuredTool.
"""
for tool in tools:
if tool.metadata.name == tool_name:
return tool
raise ValueError(f"Tool {tool_name} not found.")
def _get_dependencies_from_graph(idx: int, tool_name: str, args: str) -> List[int]:
"""Get dependencies from a graph."""
if tool_name == "join":
# depends on the previous step
dependencies = list(range(1, idx))
else:
# define dependencies based on the dependency rule in tool_definitions.py
dependencies = [i for i in range(1, idx) if default_dependency_rule(i, args)]
return dependencies
def instantiate_new_step(
tools: Sequence[BaseTool],
idx: int,
tool_name: str,
args: str,
thought: str,
) -> LLMCompilerTask:
"""Instantiate a new step."""
dependencies = _get_dependencies_from_graph(idx, tool_name, args)
args_list = parse_llm_compiler_action_args(args)
if tool_name == "join":
# tool: Optional[BaseTool] = None
# assume that the only tool that returns None is join
tool: BaseTool = FunctionTool.from_defaults(fn=lambda x: None)
else:
tool = _find_tool(tool_name, tools)
return LLMCompilerTask(
idx=idx,
name=tool_name,
tool=adapt_to_async_tool(tool),
args=args_list,
dependencies=dependencies,
# TODO: look into adding a stringify rule
# stringify_rule=stringify_rule,
thought=thought,
is_join=tool_name == "join",
)
def get_graph_dict(
parse_results: List[LLMCompilerParseResult],
tools: Sequence[BaseTool],
) -> Dict[int, Any]:
"""Get graph dict."""
graph_dict = {}
for parse_result in parse_results:
# idx = 1, function = "search", args = "Ronaldo number of kids"
# thought will be the preceding thought, if any, otherwise an empty string
# thought, idx, tool_name, args, _ = match
idx = int(parse_result.idx)
task = instantiate_new_step(
tools=tools,
idx=idx,
tool_name=parse_result.tool_name,
args=parse_result.args,
thought=parse_result.thought,
)
graph_dict[idx] = task
if task.is_join:
break
return graph_dict
def generate_context_for_replanner(
tasks: Dict[int, LLMCompilerTask], joiner_thought: str
) -> str:
"""Generate context for replanning.
Formatted like this.
```
1. action 1
Observation: xxx
2. action 2
Observation: yyy
...
Thought: joinner_thought
```
"""
previous_plan_and_observations = "\n".join(
[
task.get_thought_action_observation(
include_action=True, include_action_idx=True
)
for task in tasks.values()
if not task.is_join
]
)
joiner_thought = f"Thought: {joiner_thought}"
# use f-string instead
return f"{previous_plan_and_observations}\n\n{joiner_thought}"
def format_contexts(contexts: Sequence[str]) -> str:
"""Format contexts.
Taken from https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/llm_compiler.py
Contexts is a list of context.
Each context is formatted as the description of generate_context_for_replanner
"""
formatted_contexts = ""
for context in contexts:
formatted_contexts += f"Previous Plan:\n\n{context}\n\n"
formatted_contexts += "Current Plan:\n\n"
return formatted_contexts
|
from __future__ import annotations
import random
import pytest
from datasets import Dataset
from sentence_transformers.sampler import NoDuplicatesBatchSampler
@pytest.fixture
def dummy_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 47, 3, 30, 3, ... 2],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
# Create a list of two 0's, two 1's, two 2's, ... two 49's. Then shuffle.
values = [j for i in range(50) for j in (i, i)]
random.shuffle(values)
data = {"data": values, "label": [i % 2 for i in range(100)]}
return Dataset.from_dict(data)
def test_group_by_label_batch_sampler_label_a(dummy_dataset):
batch_size = 10
sampler = NoDuplicatesBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label"]
)
batches = list(iter(sampler))
# Assert all batch sizes are correct
assert all(len(batch) == batch_size for batch in batches)
# Assert batches contain no duplicate values
for batch in batches:
batch_values = [dummy_dataset[i]["data"] for i in batch]
assert len(batch_values) == len(set(batch_values)), f"Batch {batch} contains duplicate values: {batch_values}"
|
import random
import pytest
from datasets import Dataset
from sentence_transformers.sampler import NoDuplicatesBatchSampler
@pytest.fixture
def dummy_dataset():
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 47, 3, 30, 3, ... 2],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
# Create a list of two 0's, two 1's, two 2's, ... two 49's. Then shuffle.
values = [j for i in range(50) for j in (i, i)]
random.shuffle(values)
data = {"data": values, "label": [i % 2 for i in range(100)]}
return Dataset.from_dict(data)
def test_group_by_label_batch_sampler_label_a(dummy_dataset):
batch_size = 10
sampler = NoDuplicatesBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label"]
)
batches = list(iter(sampler))
# Assert all batch sizes are correct
assert all(len(batch) == batch_size for batch in batches)
# Assert batches contain no duplicate values
for batch in batches:
batch_values = [dummy_dataset[i]["data"] for i in batch]
assert len(batch_values) == len(set(batch_values)), f"Batch {batch} contains duplicate values: {batch_values}"
|
"""Test Base Schema of documents."""
from collections.abc import Iterator
import pytest
from typing_extensions import override
from langchain_core.document_loaders.base import BaseBlobParser, BaseLoader
from langchain_core.documents import Document
from langchain_core.documents.base import Blob
def test_base_blob_parser() -> None:
"""Verify that the eager method is hooked up to the lazy method by default."""
class MyParser(BaseBlobParser):
"""A simple parser that returns a single document."""
@override
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazy parsing interface."""
yield Document(
page_content="foo",
)
parser = MyParser()
assert isinstance(parser.lazy_parse(Blob(data="who?")), Iterator)
# We're verifying that the eager method is hooked up to the lazy method by default.
docs = parser.parse(Blob(data="who?"))
assert len(docs) == 1
assert docs[0].page_content == "foo"
def test_default_lazy_load() -> None:
class FakeLoader(BaseLoader):
def load(self) -> list[Document]:
return [
Document(page_content="foo"),
Document(page_content="bar"),
]
loader = FakeLoader()
docs = list(loader.lazy_load())
assert docs == [Document(page_content="foo"), Document(page_content="bar")]
def test_lazy_load_not_implemented() -> None:
class FakeLoader(BaseLoader):
pass
loader = FakeLoader()
with pytest.raises(NotImplementedError):
loader.lazy_load()
async def test_default_aload() -> None:
class FakeLoader(BaseLoader):
def lazy_load(self) -> Iterator[Document]:
yield from [
Document(page_content="foo"),
Document(page_content="bar"),
]
loader = FakeLoader()
docs = loader.load()
assert docs == [Document(page_content="foo"), Document(page_content="bar")]
assert docs == [doc async for doc in loader.alazy_load()]
assert docs == await loader.aload()
|
"""Test Base Schema of documents."""
from collections.abc import Iterator
import pytest
from langchain_core.document_loaders.base import BaseBlobParser, BaseLoader
from langchain_core.documents import Document
from langchain_core.documents.base import Blob
def test_base_blob_parser() -> None:
"""Verify that the eager method is hooked up to the lazy method by default."""
class MyParser(BaseBlobParser):
"""A simple parser that returns a single document."""
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazy parsing interface."""
yield Document(
page_content="foo",
)
parser = MyParser()
assert isinstance(parser.lazy_parse(Blob(data="who?")), Iterator)
# We're verifying that the eager method is hooked up to the lazy method by default.
docs = parser.parse(Blob(data="who?"))
assert len(docs) == 1
assert docs[0].page_content == "foo"
def test_default_lazy_load() -> None:
class FakeLoader(BaseLoader):
def load(self) -> list[Document]:
return [
Document(page_content="foo"),
Document(page_content="bar"),
]
loader = FakeLoader()
docs = list(loader.lazy_load())
assert docs == [Document(page_content="foo"), Document(page_content="bar")]
def test_lazy_load_not_implemented() -> None:
class FakeLoader(BaseLoader):
pass
loader = FakeLoader()
with pytest.raises(NotImplementedError):
loader.lazy_load()
async def test_default_aload() -> None:
class FakeLoader(BaseLoader):
def lazy_load(self) -> Iterator[Document]:
yield from [
Document(page_content="foo"),
Document(page_content="bar"),
]
loader = FakeLoader()
docs = loader.load()
assert docs == [Document(page_content="foo"), Document(page_content="bar")]
assert docs == [doc async for doc in loader.alazy_load()]
assert docs == await loader.aload()
|
import os
from pathlib import Path
from torchaudio.datasets import librispeech
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin
# Used to generate a unique transcript for each dummy audio file
_NUMBERS = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"]
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
dataset_dir = os.path.join(root_dir, librispeech.FOLDER_IN_ARCHIVE, librispeech.URL)
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 16000 # 16kHz
seed = 0
for speaker_id in range(5):
speaker_path = os.path.join(dataset_dir, str(speaker_id))
os.makedirs(speaker_path, exist_ok=True)
for chapter_id in range(3):
chapter_path = os.path.join(speaker_path, str(chapter_id))
os.makedirs(chapter_path, exist_ok=True)
trans_content = []
for utterance_id in range(10):
filename = f"{speaker_id}-{chapter_id}-{utterance_id:04d}.wav"
path = os.path.join(chapter_path, filename)
transcript = " ".join([_NUMBERS[x] for x in [speaker_id, chapter_id, utterance_id]])
trans_content.append(f"{speaker_id}-{chapter_id}-{utterance_id:04d} {transcript}")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
save_wav(path, data, sample_rate)
sample = (normalize_wav(data), sample_rate, transcript, speaker_id, chapter_id, utterance_id)
mocked_data.append(sample)
seed += 1
trans_filename = f"{speaker_id}-{chapter_id}.trans.txt"
trans_path = os.path.join(chapter_path, trans_filename)
with open(trans_path, "w") as f:
f.write("\n".join(trans_content))
return mocked_data
class LibriSpeechTestMixin(TempDirMixin):
backend = "default"
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
@classmethod
def tearDownClass(cls):
# In case of test failure
cls.librispeech_cls._ext_audio = ".flac"
def _test_librispeech(self, dataset):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, chapter_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert transcript == self.samples[i][2]
assert speaker_id == self.samples[i][3]
assert chapter_id == self.samples[i][4]
assert utterance_id == self.samples[i][5]
num_samples += 1
assert num_samples == len(self.samples)
self.librispeech_cls._ext_audio = ".flac"
def test_librispeech_str(self):
self.librispeech_cls._ext_audio = ".wav"
dataset = self.librispeech_cls(self.root_dir)
self._test_librispeech(dataset)
def test_librispeech_path(self):
self.librispeech_cls._ext_audio = ".wav"
dataset = self.librispeech_cls(Path(self.root_dir))
self._test_librispeech(dataset)
|
import os
from pathlib import Path
from torchaudio.datasets import librispeech
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
)
# Used to generate a unique transcript for each dummy audio file
_NUMBERS = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"]
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
dataset_dir = os.path.join(root_dir, librispeech.FOLDER_IN_ARCHIVE, librispeech.URL)
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 16000 # 16kHz
seed = 0
for speaker_id in range(5):
speaker_path = os.path.join(dataset_dir, str(speaker_id))
os.makedirs(speaker_path, exist_ok=True)
for chapter_id in range(3):
chapter_path = os.path.join(speaker_path, str(chapter_id))
os.makedirs(chapter_path, exist_ok=True)
trans_content = []
for utterance_id in range(10):
filename = f"{speaker_id}-{chapter_id}-{utterance_id:04d}.wav"
path = os.path.join(chapter_path, filename)
transcript = " ".join([_NUMBERS[x] for x in [speaker_id, chapter_id, utterance_id]])
trans_content.append(f"{speaker_id}-{chapter_id}-{utterance_id:04d} {transcript}")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
save_wav(path, data, sample_rate)
sample = (normalize_wav(data), sample_rate, transcript, speaker_id, chapter_id, utterance_id)
mocked_data.append(sample)
seed += 1
trans_filename = f"{speaker_id}-{chapter_id}.trans.txt"
trans_path = os.path.join(chapter_path, trans_filename)
with open(trans_path, "w") as f:
f.write("\n".join(trans_content))
return mocked_data
class LibriSpeechTestMixin(TempDirMixin):
backend = "default"
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
@classmethod
def tearDownClass(cls):
# In case of test failure
cls.librispeech_cls._ext_audio = ".flac"
def _test_librispeech(self, dataset):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, chapter_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert transcript == self.samples[i][2]
assert speaker_id == self.samples[i][3]
assert chapter_id == self.samples[i][4]
assert utterance_id == self.samples[i][5]
num_samples += 1
assert num_samples == len(self.samples)
self.librispeech_cls._ext_audio = ".flac"
def test_librispeech_str(self):
self.librispeech_cls._ext_audio = ".wav"
dataset = self.librispeech_cls(self.root_dir)
self._test_librispeech(dataset)
def test_librispeech_path(self):
self.librispeech_cls._ext_audio = ".wav"
dataset = self.librispeech_cls(Path(self.root_dir))
self._test_librispeech(dataset)
|
from langchain_core.output_parsers.list import (
CommaSeparatedListOutputParser,
ListOutputParser,
MarkdownListOutputParser,
NumberedListOutputParser,
)
__all__ = [
"CommaSeparatedListOutputParser",
"ListOutputParser",
"MarkdownListOutputParser",
"NumberedListOutputParser",
]
|
from langchain_core.output_parsers.list import (
CommaSeparatedListOutputParser,
ListOutputParser,
MarkdownListOutputParser,
NumberedListOutputParser,
)
__all__ = [
"ListOutputParser",
"CommaSeparatedListOutputParser",
"NumberedListOutputParser",
"MarkdownListOutputParser",
]
|
_base_ = './vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = '../common/lsj-200e_coco-instance.py'
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
# model settings
model = dict(
type='SOLO',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
num_outs=5),
mask_head=dict(
type='SOLOHead',
num_classes=80,
in_channels=256,
stacked_convs=7,
feat_channels=256,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
# model training and testing settings
test_cfg=dict(
nms_pre=500,
score_thr=0.1,
mask_thr=0.5,
filter_thr=0.05,
kernel='gaussian', # gaussian/linear
sigma=2.0,
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004),
clip_grad=dict(max_norm=35, norm_type=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = '../common/lsj_200e_coco_instance.py'
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
# model settings
model = dict(
type='SOLO',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=0,
num_outs=5),
mask_head=dict(
type='SOLOHead',
num_classes=80,
in_channels=256,
stacked_convs=7,
feat_channels=256,
strides=[8, 8, 16, 32, 32],
scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),
pos_scale=0.2,
num_grids=[40, 36, 24, 16, 12],
cls_down_index=0,
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
# model training and testing settings
test_cfg=dict(
nms_pre=500,
score_thr=0.1,
mask_thr=0.5,
filter_thr=0.05,
kernel='gaussian', # gaussian/linear
sigma=2.0,
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004),
clip_grad=dict(max_norm=35, norm_type=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = '../cascade_rcnn/cascade-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
_delete_=True,
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')),
neck=dict(
_delete_=True,
type='HRFPN',
in_channels=[32, 64, 128, 256],
out_channels=256))
# learning policy
max_epochs = 20
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
|
_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
_delete_=True,
type='HRNet',
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w32')),
neck=dict(
_delete_=True,
type='HRFPN',
in_channels=[32, 64, 128, 256],
out_channels=256))
# learning policy
max_epochs = 20
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
|
import os
from typing import TypeVar, Union
T = TypeVar("T")
ListLike = Union[list[T], tuple[T, ...]]
NestedDataStructureLike = Union[T, list[T], dict[str, T]]
PathLike = Union[str, bytes, os.PathLike]
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
T = TypeVar("T")
ListLike = Union[List[T], Tuple[T, ...]]
NestedDataStructureLike = Union[T, List[T], Dict[str, T]]
PathLike = Union[str, bytes, os.PathLike]
|
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True)
def disable_implicit_token(monkeypatch):
monkeypatch.setattr("huggingface_hub.constants.HF_HUB_DISABLE_IMPLICIT_TOKEN", True)
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
try:
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
except (ModuleNotFoundError, AttributeError):
pass
@pytest.fixture(autouse=True, scope="session")
def zero_time_out_for_remote_code():
datasets.config.TIME_OUT_REMOTE_CODE = 0
|
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True)
def disable_implicit_token(monkeypatch):
monkeypatch.setattr("huggingface_hub.constants.HF_HUB_DISABLE_IMPLICIT_TOKEN", True)
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
try:
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
except AttributeError:
pass
@pytest.fixture(autouse=True, scope="session")
def zero_time_out_for_remote_code():
datasets.config.TIME_OUT_REMOTE_CODE = 0
|
from argparse import Namespace
from copy import deepcopy
from typing import TYPE_CHECKING, Type
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
if TYPE_CHECKING: # pragma: no cover
from jina.orchestrate.pods import BasePod
class PodFactory:
"""
A PodFactory is a factory class, abstracting the Pod creation
"""
@staticmethod
def build_pod(args: 'Namespace') -> Type['BasePod']:
"""Build an implementation of a `BasePod` interface
:param args: deployment arguments parsed from the CLI.
:return: the created BaseDeployment
"""
# copy to update but forward original
cargs = deepcopy(args)
if is_valid_huburi(cargs.uses):
_hub_args = deepcopy(args)
_hub_args.uri = args.uses
_hub_args.no_usage = True
cargs.uses = HubIO(_hub_args).pull()
if (
cargs.pod_role != PodRoleType.HEAD
and cargs.uses
and cargs.uses.startswith('docker://')
):
return ContainerPod(cargs)
else:
return Pod(args)
|
from argparse import Namespace
from copy import deepcopy
from typing import TYPE_CHECKING, Type
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
if TYPE_CHECKING: # pragma: no cover
from jina.orchestrate.pods import BasePod
class PodFactory:
"""
A PodFactory is a factory class, abstracting the Pod creation
"""
@staticmethod
def build_pod(args: 'Namespace') -> Type['BasePod']:
"""Build an implementation of a `BasePod` interface
:param args: deployment arguments parsed from the CLI.
:return: the created BaseDeployment
"""
# copy to update but forward original
cargs = deepcopy(args)
if is_valid_huburi(cargs.uses):
_hub_args = deepcopy(args)
_hub_args.uri = args.uses
_hub_args.no_usage = True
cargs.uses = HubIO(_hub_args).pull()
if (
cargs.pod_role != PodRoleType.HEAD
and cargs.uses
and cargs.uses.startswith('docker://')
):
return ContainerPod(cargs)
else:
return Pod(args)
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from ..builder import PIPELINES
@PIPELINES.register_module()
class InstaBoost:
r"""Data augmentation method in `InstaBoost: Boosting Instance
Segmentation Via Probability Map Guided Copy-Pasting
<https://arxiv.org/abs/1908.07801>`_.
Refer to https://github.com/GothicAi/Instaboost for implementation details.
Args:
action_candidate (tuple): Action candidates. "normal", "horizontal", \
"vertical", "skip" are supported. Default: ('normal', \
'horizontal', 'skip').
action_prob (tuple): Corresponding action probabilities. Should be \
the same length as action_candidate. Default: (1, 0, 0).
scale (tuple): (min scale, max scale). Default: (0.8, 1.2).
dx (int): The maximum x-axis shift will be (instance width) / dx.
Default 15.
dy (int): The maximum y-axis shift will be (instance height) / dy.
Default 15.
theta (tuple): (min rotation degree, max rotation degree). \
Default: (-1, 1).
color_prob (float): Probability of images for color augmentation.
Default 0.5.
heatmap_flag (bool): Whether to use heatmap guided. Default False.
aug_ratio (float): Probability of applying this transformation. \
Default 0.5.
"""
def __init__(self,
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError(
'Please run "pip install instaboostfast" '
'to install instaboostfast first for instaboost augmentation.')
self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob,
scale, dx, dy, theta,
color_prob, hflag)
self.aug_ratio = aug_ratio
def _load_anns(self, results):
labels = results['ann_info']['labels']
masks = results['ann_info']['masks']
bboxes = results['ann_info']['bboxes']
n = len(labels)
anns = []
for i in range(n):
label = labels[i]
bbox = bboxes[i]
mask = masks[i]
x1, y1, x2, y2 = bbox
# assert (x2 - x1) >= 1 and (y2 - y1) >= 1
bbox = [x1, y1, x2 - x1, y2 - y1]
anns.append({
'category_id': label,
'segmentation': mask,
'bbox': bbox
})
return anns
def _parse_anns(self, results, anns, img):
gt_bboxes = []
gt_labels = []
gt_masks_ann = []
for ann in anns:
x1, y1, w, h = ann['bbox']
# TODO: more essential bug need to be fixed in instaboost
if w <= 0 or h <= 0:
continue
bbox = [x1, y1, x1 + w, y1 + h]
gt_bboxes.append(bbox)
gt_labels.append(ann['category_id'])
gt_masks_ann.append(ann['segmentation'])
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
results['ann_info']['labels'] = gt_labels
results['ann_info']['bboxes'] = gt_bboxes
results['ann_info']['masks'] = gt_masks_ann
results['img'] = img
return results
def __call__(self, results):
img = results['img']
ori_type = img.dtype
anns = self._load_anns(results)
if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError('Please run "pip install instaboostfast" '
'to install instaboostfast first.')
anns, img = instaboost.get_new_data(
anns, img.astype(np.uint8), self.cfg, background=None)
results = self._parse_anns(results, anns, img.astype(ori_type))
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'
return repr_str
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from ..builder import PIPELINES
@PIPELINES.register_module()
class InstaBoost:
r"""Data augmentation method in `InstaBoost: Boosting Instance
Segmentation Via Probability Map Guided Copy-Pasting
<https://arxiv.org/abs/1908.07801>`_.
Refer to https://github.com/GothicAi/Instaboost for implementation details.
Args:
action_candidate (tuple): Action candidates. "normal", "horizontal", \
"vertical", "skip" are supported. Default: ('normal', \
'horizontal', 'skip').
action_prob (tuple): Corresponding action probabilities. Should be \
the same length as action_candidate. Default: (1, 0, 0).
scale (tuple): (min scale, max scale). Default: (0.8, 1.2).
dx (int): The maximum x-axis shift will be (instance width) / dx.
Default 15.
dy (int): The maximum y-axis shift will be (instance height) / dy.
Default 15.
theta (tuple): (min rotation degree, max rotation degree). \
Default: (-1, 1).
color_prob (float): Probability of images for color augmentation.
Default 0.5.
heatmap_flag (bool): Whether to use heatmap guided. Default False.
aug_ratio (float): Probability of applying this transformation. \
Default 0.5.
"""
def __init__(self,
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError(
'Please run "pip install instaboostfast" '
'to install instaboostfast first for instaboost augmentation.')
self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob,
scale, dx, dy, theta,
color_prob, hflag)
self.aug_ratio = aug_ratio
def _load_anns(self, results):
labels = results['ann_info']['labels']
masks = results['ann_info']['masks']
bboxes = results['ann_info']['bboxes']
n = len(labels)
anns = []
for i in range(n):
label = labels[i]
bbox = bboxes[i]
mask = masks[i]
x1, y1, x2, y2 = bbox
# assert (x2 - x1) >= 1 and (y2 - y1) >= 1
bbox = [x1, y1, x2 - x1, y2 - y1]
anns.append({
'category_id': label,
'segmentation': mask,
'bbox': bbox
})
return anns
def _parse_anns(self, results, anns, img):
gt_bboxes = []
gt_labels = []
gt_masks_ann = []
for ann in anns:
x1, y1, w, h = ann['bbox']
# TODO: more essential bug need to be fixed in instaboost
if w <= 0 or h <= 0:
continue
bbox = [x1, y1, x1 + w, y1 + h]
gt_bboxes.append(bbox)
gt_labels.append(ann['category_id'])
gt_masks_ann.append(ann['segmentation'])
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
results['ann_info']['labels'] = gt_labels
results['ann_info']['bboxes'] = gt_bboxes
results['ann_info']['masks'] = gt_masks_ann
results['img'] = img
return results
def __call__(self, results):
img = results['img']
orig_type = img.dtype
anns = self._load_anns(results)
if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError('Please run "pip install instaboostfast" '
'to install instaboostfast first.')
anns, img = instaboost.get_new_data(
anns, img.astype(np.uint8), self.cfg, background=None)
results = self._parse_anns(results, anns, img.astype(orig_type))
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'
return repr_str
|
class WorkflowValidationError(Exception):
pass
class WorkflowTimeoutError(Exception):
pass
class WorkflowRuntimeError(Exception):
pass
class WorkflowDone(Exception):
pass
class WorkflowCancelledByUser(Exception):
pass
class WorkflowStepDoesNotExistError(Exception):
pass
class WorkflowConfigurationError(Exception):
pass
class ContextSerdeError(Exception):
pass
|
class WorkflowValidationError(Exception):
pass
class WorkflowTimeoutError(Exception):
pass
class WorkflowRuntimeError(Exception):
pass
class WorkflowDone(Exception):
pass
class WorkflowCancelledByUser(Exception):
pass
class WorkflowStepDoesNotExistError(Exception):
pass
class WorkflowConfigurationError(Exception):
pass
|
from abc import ABC, abstractmethod
import warnings
from collections import namedtuple
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING, Union, List, Tuple
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
TypeMap = namedtuple('TypeMap', ['type', 'converter'])
class BaseBackendMixin(ABC):
TYPE_MAP: Dict[str, TypeMap]
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs,
):
self._load_offset2ids()
def _init_subindices(self, *args, **kwargs):
self._subindices = {}
subindex_configs = kwargs.get('subindex_configs', None)
if subindex_configs:
config = asdict(self._config) if getattr(self, '_config', None) else dict()
for name, config_subindex in subindex_configs.items():
config_subindex = (
dict() if config_subindex is None else config_subindex
) # allow None as input
if is_dataclass(config_subindex):
config_subindex = asdict(config_subindex)
config_joined = {**config, **config_subindex}
config_joined = self._ensure_unique_config(
config, config_subindex, config_joined, name
)
self._subindices[name] = self.__class__(config=config_joined)
self._subindices[name].extend(self.traverse_flat(name[1:]))
@abstractmethod
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
"""
Ensures that the subindex configuration is unique, despite it inheriting unpopulated fields from the root config.
:param config_root: The configuration of the root index.
:param config_subindex: The configuration that was explicitly provided by the user for the subindex.
:param config_joined: The configuration that combines root and subindex configs. This is the configuration that will be used for subindex construction.
:param subindex_name: Name (access path) of the subindex
:return: config_joined that is unique compared to config_root
"""
...
def _get_storage_infos(self) -> Optional[Dict]:
if hasattr(self, '_config') and is_dataclass(self._config):
return {k: str(v) for k, v in asdict(self._config).items()}
def _map_id(self, _id: str) -> str:
return _id
def _map_column(self, value, col_type) -> str:
return self.TYPE_MAP[col_type].converter(value)
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
from docarray.math.ndarray import to_numpy_array
return to_numpy_array(embedding)
def _map_type(self, col_type: str) -> str:
return self.TYPE_MAP[col_type].type
def _normalize_columns(
self, columns: Optional[Union[List[Tuple[str, str]], Dict[str, str]]]
) -> Dict[str, str]:
if columns is None:
return {}
if isinstance(columns, list):
warnings.warn(
'Using "columns" as a List of Tuples will be deprecated soon. Please provide a Dictionary.'
)
columns = {col_desc[0]: col_desc[1] for col_desc in columns}
return columns
|
from abc import ABC, abstractmethod
from collections import namedtuple
from dataclasses import is_dataclass, asdict
from typing import Dict, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
TypeMap = namedtuple('TypeMap', ['type', 'converter'])
class BaseBackendMixin(ABC):
TYPE_MAP: Dict[str, TypeMap]
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs,
):
self._load_offset2ids()
def _init_subindices(self, *args, **kwargs):
self._subindices = {}
subindex_configs = kwargs.get('subindex_configs', None)
if subindex_configs:
config = asdict(self._config) if getattr(self, '_config', None) else dict()
for name, config_subindex in subindex_configs.items():
config_subindex = (
dict() if config_subindex is None else config_subindex
) # allow None as input
if is_dataclass(config_subindex):
config_subindex = asdict(config_subindex)
config_joined = {**config, **config_subindex}
config_joined = self._ensure_unique_config(
config, config_subindex, config_joined, name
)
self._subindices[name] = self.__class__(config=config_joined)
self._subindices[name].extend(self.traverse_flat(name[1:]))
@abstractmethod
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
"""
Ensures that the subindex configuration is unique, despite it inheriting unpopulated fields from the root config.
:param config_root: The configuration of the root index.
:param config_subindex: The configuration that was explicitly provided by the user for the subindex.
:param config_joined: The configuration that combines root and subindex configs. This is the configuration that will be used for subindex construction.
:param subindex_name: Name (access path) of the subindex
:return: config_joined that is unique compared to config_root
"""
...
def _get_storage_infos(self) -> Optional[Dict]:
if hasattr(self, '_config') and is_dataclass(self._config):
return {k: str(v) for k, v in asdict(self._config).items()}
def _map_id(self, _id: str) -> str:
return _id
def _map_column(self, value, col_type) -> str:
return self.TYPE_MAP[col_type].converter(value)
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
from docarray.math.ndarray import to_numpy_array
return to_numpy_array(embedding)
def _map_type(self, col_type: str) -> str:
return self.TYPE_MAP[col_type].type
def _normalize_columns(self, columns):
if columns is None:
return []
return columns
|
from ._alignment import forced_align, merge_tokens, TokenSpan
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"forced_align",
"merge_tokens",
"TokenSpan",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
]
|
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
forced_align,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
merge_tokens,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
TokenSpan,
)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"forced_align",
"merge_tokens",
"TokenSpan",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
]
|
import io
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.io import decode_jpeg, encode_jpeg
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomErase` for details."""
if torch.jit.is_scripting():
return erase_image(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, tv_tensors.Image)
def erase_image(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def _erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, tv_tensors.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def jpeg(image: torch.Tensor, quality: int) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.JPEG` for details."""
if torch.jit.is_scripting():
return jpeg_image(image, quality=quality)
_log_api_usage_once(jpeg)
kernel = _get_kernel(jpeg, type(image))
return kernel(image, quality=quality)
@_register_kernel_internal(jpeg, torch.Tensor)
@_register_kernel_internal(jpeg, tv_tensors.Image)
def jpeg_image(image: torch.Tensor, quality: int) -> torch.Tensor:
original_shape = image.shape
image = image.view((-1,) + image.shape[-3:])
if image.shape[0] == 0: # degenerate
return image.reshape(original_shape).clone()
images = []
for i in range(image.shape[0]):
# isinstance checks are needed for torchscript.
encoded_image = encode_jpeg(image[i], quality=quality)
assert isinstance(encoded_image, torch.Tensor)
decoded_image = decode_jpeg(encoded_image)
assert isinstance(decoded_image, torch.Tensor)
images.append(decoded_image)
images = torch.stack(images, dim=0).view(original_shape)
return images
@_register_kernel_internal(jpeg, tv_tensors.Video)
def jpeg_video(video: torch.Tensor, quality: int) -> torch.Tensor:
return jpeg_image(video, quality=quality)
@_register_kernel_internal(jpeg, PIL.Image.Image)
def _jpeg_image_pil(image: PIL.Image.Image, quality: int) -> PIL.Image.Image:
raw_jpeg = io.BytesIO()
image.save(raw_jpeg, format="JPEG", quality=quality)
# we need to copy since PIL.Image.open() will return PIL.JpegImagePlugin.JpegImageFile
# which is a sub-class of PIL.Image.Image. this will fail check_transform() test.
return PIL.Image.open(raw_jpeg).copy()
|
import io
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.io import decode_jpeg, encode_jpeg
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomErase` for details."""
if torch.jit.is_scripting():
return erase_image(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, tv_tensors.Image)
def erase_image(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def _erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, tv_tensors.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def jpeg(image: torch.Tensor, quality: int) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.JPEG` for details."""
if torch.jit.is_scripting():
return jpeg_image(image, quality=quality)
_log_api_usage_once(jpeg)
kernel = _get_kernel(jpeg, type(image))
return kernel(image, quality=quality)
@_register_kernel_internal(jpeg, torch.Tensor)
@_register_kernel_internal(jpeg, tv_tensors.Image)
def jpeg_image(image: torch.Tensor, quality: int) -> torch.Tensor:
original_shape = image.shape
image = image.view((-1,) + image.shape[-3:])
if image.shape[0] == 0: # degenerate
return image.reshape(original_shape).clone()
images = []
for i in range(image.shape[0]):
encoded_image = encode_jpeg(image[i], quality=quality)
assert isinstance(encoded_image, torch.Tensor) # For torchscript
images.append(decode_jpeg(encoded_image))
images = torch.stack(images, dim=0).view(original_shape)
return images
@_register_kernel_internal(jpeg, tv_tensors.Video)
def jpeg_video(video: torch.Tensor, quality: int) -> torch.Tensor:
return jpeg_image(video, quality=quality)
@_register_kernel_internal(jpeg, PIL.Image.Image)
def _jpeg_image_pil(image: PIL.Image.Image, quality: int) -> PIL.Image.Image:
raw_jpeg = io.BytesIO()
image.save(raw_jpeg, format="JPEG", quality=quality)
# we need to copy since PIL.Image.open() will return PIL.JpegImagePlugin.JpegImageFile
# which is a sub-class of PIL.Image.Image. this will fail check_transform() test.
return PIL.Image.open(raw_jpeg).copy()
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
from mmdet.core.utils.typing import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLOV3(SingleStageDetector):
r"""Implementation of `Yolov3: An incremental improvement
<https://arxiv.org/abs/1804.02767>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of YOLOX. Default: None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of YOLOX. Default: None.
data_preprocessor (:obj:`ConfigDict` or dict, optional):
Model preprocessing config for processing the input data.
it usually includes ``to_rgb``, ``pad_size_divisor``,
``pad_value``, ``mean`` and ``std``. Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
from mmdet.core.utils.typing import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLOV3(SingleStageDetector):
r"""Implementation of `Yolov3: An incremental improvement
<https://arxiv.org/abs/1804.02767>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of YOLOX. Default: None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of YOLOX. Default: None.
preprocess_cfg (:obj:`ConfigDict` or dict, optional):
Model preprocessing config for processing the input data.
it usually includes ``to_rgb``, ``pad_size_divisor``,
``pad_value``, ``mean`` and ``std``. Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg,
init_cfg=init_cfg)
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
from torch import distributed as dist
from torch.distributed.checkpoint import FileSystemReader, FileSystemWriter, load, save
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel
from torch.distributed.fsdp.wrap import enable_wrap, wrap
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, SkipModel
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
_DISTRIBUTED_STATE_DICT_IMPLS = {
StateDictType.LOCAL_STATE_DICT,
StateDictType.SHARDED_STATE_DICT,
}
class TestDistributedCheckpoint(FSDPTest):
@property
def world_size(self):
if torch.cuda.is_available():
gpu_cnt = torch.cuda.device_count()
if gpu_cnt < 2:
return gpu_cnt
return 2
@skip_if_lt_x_gpu(2)
@with_temp_dir
@parametrize("state_dict_type", _DISTRIBUTED_STATE_DICT_IMPLS)
def test_distributed_checkpoint(self, state_dict_type) -> None:
with enable_wrap(wrapper_cls=FSDP):
torch.manual_seed(100)
model = wrap(SkipModel(double_nest=True))
torch.manual_seed(200)
new_model = wrap(SkipModel(double_nest=True))
with (
FullyShardedDataParallel.summon_full_params(model),
FullyShardedDataParallel.summon_full_params(new_model),
):
params = list(model.parameters())
new_params = list(new_model.parameters())
self.assertNotEqual(params, new_params)
writer = FileSystemWriter(self.temp_dir)
reader = FileSystemReader(self.temp_dir)
with (
FSDP.state_dict_type(model, state_dict_type),
FSDP.state_dict_type(new_model, state_dict_type),
):
state_dict = model.state_dict()
save(state_dict, writer)
with (
FSDP.state_dict_type(model, state_dict_type),
FSDP.state_dict_type(new_model, state_dict_type),
):
state_dict = new_model.state_dict()
load(state_dict, reader)
new_model.load_state_dict(state_dict)
with (
FullyShardedDataParallel.summon_full_params(model),
FullyShardedDataParallel.summon_full_params(new_model),
):
params = list(model.parameters())
new_params = list(new_model.parameters())
self.assertEqual(params, new_params)
# TODO: add resharding test case.
devices = ("cuda", "hpu")
instantiate_device_type_tests(TestDistributedCheckpoint, globals(), only_for=devices)
if __name__ == "__main__":
run_tests()
|
# Owner(s): ["oncall: distributed"]
import sys
import torch
from torch import distributed as dist
from torch.distributed.checkpoint import FileSystemReader, FileSystemWriter, load, save
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel
from torch.distributed.fsdp.wrap import enable_wrap, wrap
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import FSDPTest, SkipModel
from torch.testing._internal.common_utils import (
parametrize,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
_DISTRIBUTED_STATE_DICT_IMPLS = {
StateDictType.LOCAL_STATE_DICT,
StateDictType.SHARDED_STATE_DICT,
}
class TestDistributedCheckpoint(FSDPTest):
@property
def world_size(self):
if torch.cuda.is_available():
gpu_cnt = torch.cuda.device_count()
if gpu_cnt < 2:
return gpu_cnt
return 2
@skip_if_lt_x_gpu(2)
@with_temp_dir
@parametrize("state_dict_type", _DISTRIBUTED_STATE_DICT_IMPLS)
def test_distributed_checkpoint(self, state_dict_type) -> None:
with enable_wrap(wrapper_cls=FSDP):
torch.manual_seed(100)
model = wrap(SkipModel(double_nest=True))
torch.manual_seed(200)
new_model = wrap(SkipModel(double_nest=True))
with FullyShardedDataParallel.summon_full_params(
model
), FullyShardedDataParallel.summon_full_params(new_model):
params = list(model.parameters())
new_params = list(new_model.parameters())
self.assertNotEqual(params, new_params)
writer = FileSystemWriter(self.temp_dir)
reader = FileSystemReader(self.temp_dir)
with FSDP.state_dict_type(model, state_dict_type), FSDP.state_dict_type(
new_model, state_dict_type
):
state_dict = model.state_dict()
save(state_dict, writer)
with FSDP.state_dict_type(model, state_dict_type), FSDP.state_dict_type(
new_model, state_dict_type
):
state_dict = new_model.state_dict()
load(state_dict, reader)
new_model.load_state_dict(state_dict)
with FullyShardedDataParallel.summon_full_params(
model
), FullyShardedDataParallel.summon_full_params(new_model):
params = list(model.parameters())
new_params = list(new_model.parameters())
self.assertEqual(params, new_params)
# TODO: add resharding test case.
devices = ("cuda", "hpu")
instantiate_device_type_tests(TestDistributedCheckpoint, globals(), only_for=devices)
if __name__ == "__main__":
run_tests()
|
"""Awadb reader."""
from typing import Any, List
import numpy as np
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class AwadbReader(BaseReader):
"""
Awadb reader.
Retrieves documents through an existing awadb client.
These documents can then be used in a downstream LlamaIndex data structure.
Args:
client (awadb.client): An awadb client.
"""
def __init__(self, client: Any):
"""Initialize with parameters."""
import_err_msg = "`awadb` package not found, please run `pip install awadb`"
try:
pass
except ImportError:
raise ImportError(import_err_msg)
self.awadb_client = client
def load_data(
self,
query: np.ndarray,
k: int = 4,
separate_documents: bool = True,
) -> List[Document]:
"""
Load data from Faiss.
Args:
query (np.ndarray): A 2D numpy array of query vectors.
k (int): Number of nearest neighbors to retrieve. Defaults to 4.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
results = self.awadb_client.Search(
query,
k,
text_in_page_content=None,
meta_filter=None,
not_include_fields=None,
)
documents = []
for item_detail in results[0]["ResultItems"]:
documents.append(Document(text=item_detail["embedding_text"]))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
|
"""Awadb reader."""
from typing import Any, List
import numpy as np
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class AwadbReader(BaseReader):
"""Awadb reader.
Retrieves documents through an existing awadb client.
These documents can then be used in a downstream LlamaIndex data structure.
Args:
client (awadb.client): An awadb client.
"""
def __init__(self, client: Any):
"""Initialize with parameters."""
import_err_msg = "`awadb` package not found, please run `pip install awadb`"
try:
pass
except ImportError:
raise ImportError(import_err_msg)
self.awadb_client = client
def load_data(
self,
query: np.ndarray,
k: int = 4,
separate_documents: bool = True,
) -> List[Document]:
"""Load data from Faiss.
Args:
query (np.ndarray): A 2D numpy array of query vectors.
k (int): Number of nearest neighbors to retrieve. Defaults to 4.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
results = self.awadb_client.Search(
query,
k,
text_in_page_content=None,
meta_filter=None,
not_include_fields=None,
)
documents = []
for item_detail in results[0]["ResultItems"]:
documents.append(Document(text=item_detail["embedding_text"]))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
|
import warnings
from abc import ABC
from typing import TYPE_CHECKING, Any, BinaryIO, Dict, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTensor, ABC):
def to_bytes(self):
"""
Convert audio tensor to bytes.
"""
tensor = self.get_comp_backend().to_numpy(self)
tensor = (tensor * MAX_INT_16).astype('<h')
return tensor.tobytes()
def save(
self: 'T',
file_path: Union[str, BinaryIO],
format: str = 'wav',
frame_rate: int = 44100,
sample_width: int = 2,
pydub_args: Dict[str, Any] = {},
) -> None:
"""
Save audio tensor to an audio file. Mono/stereo is preserved.
:param file_path: path to an audio file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param format: format for the audio file ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)
:param frame_rate: sampling frequency
:param sample_width: sample width in bytes
:param pydub_args: dictionary of additional arguments for pydub.AudioSegment.export function
"""
if TYPE_CHECKING:
import pydub
else:
pydub = import_library('pydub', raise_error=True)
comp_backend = self.get_comp_backend()
channels = 2 if comp_backend.n_dim(array=self) > 1 else 1 # type: ignore
segment = pydub.AudioSegment(
self.to_bytes(),
frame_rate=frame_rate,
sample_width=sample_width,
channels=channels,
)
segment.export(file_path, format=format, **pydub_args)
def display(self, rate=44100):
"""
Play audio data from tensor in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
audio_np = self.get_comp_backend().to_numpy(self)
display(Audio(audio_np, rate=rate))
else:
warnings.warn('Display of audio is only possible in a notebook.')
|
import warnings
from abc import ABC
from typing import Any, BinaryIO, Dict, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTensor, ABC):
def to_bytes(self):
"""
Convert audio tensor to bytes.
"""
tensor = self.get_comp_backend().to_numpy(self)
tensor = (tensor * MAX_INT_16).astype('<h')
return tensor.tobytes()
def save(
self: 'T',
file_path: Union[str, BinaryIO],
format: str = 'wav',
frame_rate: int = 44100,
sample_width: int = 2,
pydub_args: Dict[str, Any] = {},
) -> None:
"""
Save audio tensor to an audio file. Mono/stereo is preserved.
:param file_path: path to an audio file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param format: format for the audio file ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)
:param frame_rate: sampling frequency
:param sample_width: sample width in bytes
:param pydub_args: dictionary of additional arguments for pydub.AudioSegment.export function
"""
from pydub import AudioSegment # type: ignore
comp_backend = self.get_comp_backend()
channels = 2 if comp_backend.n_dim(array=self) > 1 else 1 # type: ignore
segment = AudioSegment(
self.to_bytes(),
frame_rate=frame_rate,
sample_width=sample_width,
channels=channels,
)
segment.export(file_path, format=format, **pydub_args)
def display(self, rate=44100):
"""
Play audio data from tensor in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
audio_np = self.get_comp_backend().to_numpy(self)
display(Audio(audio_np, rate=rate))
else:
warnings.warn('Display of audio is only possible in a notebook.')
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=4, # simply change this from 2 to 16 for 50e - 400e training.
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
max_epochs = 25
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=5)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
# only keep latest 2 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=4, # simply change this from 2 to 16 for 50e - 400e training.
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
max_epochs = 25
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=5)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
# only keep latest 2 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/time', time.time() - self.t)
self.t = time.time()
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/time', time.time() - self.t)
self.t = time.time()
|
import numpy as np
import pytest
from keras.src import layers
from keras.src.testing import test_case
class ActivityRegularizationTest(test_case.TestCase):
def test_correctness(self):
layer = layers.ActivityRegularization(l1=0.2, l2=0.3)
layer(2 * np.ones((1,)))
self.assertLen(layer.losses, 1)
self.assertAllClose(layer.losses[0], 4 * 0.3 + 2 * 0.2)
@pytest.mark.requires_trainable_backend
def test_activity_regularization_basics(self):
self.run_layer_test(
layers.ActivityRegularization,
{"l1": 0.1, "l2": 0.2},
input_shape=(2, 3),
input_dtype="float32",
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=1,
supports_masking=True,
assert_built_after_instantiation=True,
)
|
import numpy as np
import pytest
from keras.src import layers
from keras.src.testing import test_case
class ActivityRegularizationTest(test_case.TestCase):
def test_correctness(self):
layer = layers.ActivityRegularization(l1=0.2, l2=0.3)
layer(2 * np.ones((1,)))
self.assertLen(layer.losses, 1)
self.assertAllClose(layer.losses[0], 4 * 0.3 + 2 * 0.2)
@pytest.mark.requires_trainable_backend
def test_activity_regularization_basics(self):
self.run_layer_test(
layers.ActivityRegularization,
{"l1": 0.1, "l2": 0.2},
input_shape=(2, 3),
input_dtype="float32",
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=1,
supports_masking=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Iterator, List, Optional, Sequence, Union
from mmengine.data import BaseDataElement
from ..registry.root import METRICS
from .metric import BaseMetric
class Evaluator:
"""Wrapper class to compose multiple :class:`BaseMetric` instances.
Args:
metrics (dict or BaseMetric or Sequence): The config of metrics.
"""
def __init__(self, metrics: Union[dict, BaseMetric, Sequence]):
self._dataset_meta: Optional[dict] = None
if not isinstance(metrics, Sequence):
metrics = [metrics]
self.metrics: List[BaseMetric] = []
for metric in metrics:
if isinstance(metric, BaseMetric):
self.metrics.append(metric)
elif isinstance(metric, dict):
self.metrics.append(METRICS.build(metric))
else:
raise TypeError('metric should be a dict or a BaseMetric, '
f'but got {metric}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
for metric in self.metrics:
metric.dataset_meta = dataset_meta
def process(self, data_batch: Sequence[dict],
predictions: Sequence[BaseDataElement]):
"""Convert ``BaseDataSample`` to dict and invoke process method of each
metric.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
predictions (Sequence[BaseDataElement]): A batch of outputs from
the model.
"""
_data_batch = []
for data in data_batch:
if isinstance(data['data_sample'], BaseDataElement):
_data_batch.append(
dict(
inputs=data['inputs'],
data_sample=data['data_sample'].to_dict()))
else:
_data_batch.append(data)
_predictions = []
for pred in predictions:
if isinstance(pred, BaseDataElement):
_predictions.append(pred.to_dict())
else:
_predictions.append(pred)
for metric in self.metrics:
metric.process(_data_batch, _predictions)
def evaluate(self, size: int) -> dict:
"""Invoke ``evaluate`` method of each metric and collect the metrics
dictionary.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation results of all metrics. The keys are the names
of the metrics, and the values are corresponding results.
"""
metrics = {}
for metric in self.metrics:
_results = metric.evaluate(size)
# Check metric name conflicts
for name in _results.keys():
if name in metrics:
raise ValueError(
'There are multiple evaluation results with the same '
f'metric name {name}. Please make sure all metrics '
'have different prefixes.')
metrics.update(_results)
return metrics
def offline_evaluate(self,
data: Sequence,
predictions: Sequence,
chunk_size: int = 1):
"""Offline evaluate the dumped predictions on the given data .
Args:
data (Sequence): All data of the validation set.
predictions (Sequence): All predictions of the model on the
validation set.
chunk_size (int): The number of data samples and predictions to be
processed in a batch.
"""
# support chunking iterable objects
def get_chunks(seq: Iterator, chunk_size=1):
stop = False
while not stop:
chunk = []
for _ in range(chunk_size):
try:
chunk.append(next(seq))
except StopIteration:
stop = True
break
if chunk:
yield chunk
size = 0
for data_chunk, pred_chunk in zip(
get_chunks(iter(data), chunk_size),
get_chunks(iter(predictions), chunk_size)):
size += len(data_chunk)
self.process(data_chunk, pred_chunk)
return self.evaluate(size)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Iterator, List, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataElement
from ..registry.root import METRICS
from .metric import BaseMetric
class Evaluator:
"""Wrapper class to compose multiple :class:`BaseMetric` instances.
Args:
metrics (dict or BaseMetric or Sequence): The config of metrics.
"""
def __init__(self, metrics: Union[dict, BaseMetric, Sequence]):
self._dataset_meta: Optional[dict] = None
if not isinstance(metrics, Sequence):
metrics = [metrics]
self.metrics: List[BaseMetric] = []
for metric in metrics:
if isinstance(metric, BaseMetric):
self.metrics.append(metric)
elif isinstance(metric, dict):
self.metrics.append(METRICS.build(metric))
else:
raise TypeError('metric should be a dict or a BaseMetric, '
f'but got {metric}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
for metric in self.metrics:
metric.dataset_meta = dataset_meta
def process(self, data_batch: Sequence[Tuple[Any, BaseDataElement]],
predictions: Sequence[BaseDataElement]):
"""Convert ``BaseDataSample`` to dict and invoke process method of each
metric.
Args:
data_batch (Sequence[Tuple[Any, BaseDataElement]]): A batch of data
from the dataloader.
predictions (Sequence[BaseDataElement]): A batch of outputs from
the model.
"""
_data_batch = []
for input, data in data_batch:
if isinstance(data, BaseDataElement):
_data_batch.append((input, data.to_dict()))
else:
_data_batch.append((input, data))
_predictions = []
for pred in predictions:
if isinstance(pred, BaseDataElement):
_predictions.append(pred.to_dict())
else:
_predictions.append(pred)
for metric in self.metrics:
metric.process(_data_batch, _predictions)
def evaluate(self, size: int) -> dict:
"""Invoke ``evaluate`` method of each metric and collect the metrics
dictionary.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation results of all metrics. The keys are the names
of the metrics, and the values are corresponding results.
"""
metrics = {}
for metric in self.metrics:
_results = metric.evaluate(size)
# Check metric name conflicts
for name in _results.keys():
if name in metrics:
raise ValueError(
'There are multiple evaluation results with the same '
f'metric name {name}. Please make sure all metrics '
'have different prefixes.')
metrics.update(_results)
return metrics
def offline_evaluate(self,
data: Sequence,
predictions: Sequence,
chunk_size: int = 1):
"""Offline evaluate the dumped predictions on the given data .
Args:
data (Sequence): All data of the validation set.
predictions (Sequence): All predictions of the model on the
validation set.
chunk_size (int): The number of data samples and predictions to be
processed in a batch.
"""
# support chunking iterable objects
def get_chunks(seq: Iterator, chunk_size=1):
stop = False
while not stop:
chunk = []
for _ in range(chunk_size):
try:
chunk.append(next(seq))
except StopIteration:
stop = True
break
if chunk:
yield chunk
size = 0
for data_chunk, pred_chunk in zip(
get_chunks(iter(data), chunk_size),
get_chunks(iter(predictions), chunk_size)):
size += len(data_chunk)
self.process(data_chunk, pred_chunk)
return self.evaluate(size)
|
from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> tv_tensors.Image:
"""[BETA] See :class:`~torchvision.transforms.v2.ToImage` for details."""
if isinstance(inpt, np.ndarray):
output = torch.from_numpy(inpt).permute((2, 0, 1)).contiguous()
elif isinstance(inpt, PIL.Image.Image):
output = pil_to_tensor(inpt)
elif isinstance(inpt, torch.Tensor):
output = inpt
else:
raise TypeError(f"Input can either be a numpy array or a PIL image, but got {type(inpt)} instead.")
return tv_tensors.Image(output)
to_pil_image = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
|
from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> datapoints.Image:
"""[BETA] See :class:`~torchvision.transforms.v2.ToImage` for details."""
if isinstance(inpt, np.ndarray):
output = torch.from_numpy(inpt).permute((2, 0, 1)).contiguous()
elif isinstance(inpt, PIL.Image.Image):
output = pil_to_tensor(inpt)
elif isinstance(inpt, torch.Tensor):
output = inpt
else:
raise TypeError(f"Input can either be a numpy array or a PIL image, but got {type(inpt)} instead.")
return datapoints.Image(output)
to_pil_image = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
|
import httpx
from typing import Any, Dict, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
)
from llama_index.core.callbacks import CallbackManager
from llama_index.embeddings.openai import OpenAIEmbedding
class OPEAEmbedding(OpenAIEmbedding):
"""
OPEA class for embeddings.
Args:
model (str): Model for embedding.
api_base (str): The base URL for OPEA Embeddings microservice.
additional_kwargs (Dict[str, Any]): Additional kwargs for the OpenAI API.
Examples:
`pip install llama-index-embeddings-opea`
```python
from llama_index.embeddings.opea import OPEAEmbedding
embed_model = OPEAEmbedding(
model_name="...",
api_base="http://localhost:8080",
)
```
"""
def __init__(
self,
model_name: str,
api_base: str,
dimensions: Optional[int] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
api_key: Optional[str] = "fake",
**kwargs: Any,
) -> None:
super().__init__(
model_name=model_name,
dimensions=dimensions,
embed_batch_size=embed_batch_size,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
max_retries=max_retries,
timeout=timeout,
reuse_client=reuse_client,
callback_manager=callback_manager,
default_headers=default_headers,
http_client=http_client,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "OPEAEmbedding"
|
import httpx
from typing import Any, Dict, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
)
from llama_index.core.callbacks import CallbackManager
from llama_index.embeddings.openai import OpenAIEmbedding
class OPEAEmbedding(OpenAIEmbedding):
"""
OPEA class for embeddings.
Args:
model (str): Model for embedding.
api_base (str): The base URL for OPEA Embeddings microservice.
additional_kwargs (Dict[str, Any]): Additional kwargs for the OpenAI API.
Examples:
`pip install llama-index-embeddings-opea`
```python
from llama_index.embeddings.opea import OPEAEmbedding
embed_model = OPEAEmbedding(
model_name="...",
api_base="http://localhost:8080",
)
```
"""
def __init__(
self,
model_name: str,
api_base: str,
dimensions: Optional[int] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
api_key: Optional[str] = "fake",
**kwargs: Any,
) -> None:
super().__init__(
model_name=model_name,
dimensions=dimensions,
embed_batch_size=embed_batch_size,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
max_retries=max_retries,
timeout=timeout,
reuse_client=reuse_client,
callback_manager=callback_manager,
default_headers=default_headers,
http_client=http_client,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "OPEAEmbedding"
|
from typing import Any, Dict, Union
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
"""[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
.. v2betastatus:: ConvertBoundingBoxFormat transform
Args:
format (str or datapoints.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.datapoints.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (datapoints.BoundingBoxes,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBoxes, params: Dict[str, Any]) -> datapoints.BoundingBoxes:
return F.convert_format_bounding_boxes(inpt, new_format=self.format) # type: ignore[return-value]
class ClampBoundingBoxes(Transform):
"""[BETA] Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``spatial_size`` meta-data.
.. v2betastatus:: ClampBoundingBoxes transform
"""
_transformed_types = (datapoints.BoundingBoxes,)
def _transform(self, inpt: datapoints.BoundingBoxes, params: Dict[str, Any]) -> datapoints.BoundingBoxes:
return F.clamp_bounding_boxes(inpt) # type: ignore[return-value]
|
from typing import Any, Dict, Union
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
"""[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
.. v2betastatus:: ConvertBoundingBoxFormat transform
Args:
format (str or datapoints.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.datapoints.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.convert_format_bounding_box(inpt, new_format=self.format) # type: ignore[return-value]
class ClampBoundingBox(Transform):
"""[BETA] Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``spatial_size`` meta-data.
.. v2betastatus:: ClampBoundingBox transform
"""
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.clamp_bounding_box(inpt) # type: ignore[return-value]
|
"""Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction, add_arg_group
def mixin_base_runtime_parser(arg_group):
"""Mixing in arguments required by any class that extends :class:`AsynNewLoopRuntime` into the given parser.
:param arg_group: the parser instance to which we add arguments
"""
arg_group.add_argument(
'--port-in',
type=int,
default=helper.random_port(),
dest='port',
help='The port for input data to bind to, default a random port between [49152, 65535]',
)
arg_group.add_argument(
'--host-in',
type=str,
default=__default_host__,
help=f'The host address for binding to, by default it is {__default_host__}',
)
arg_group.add_argument(
'--grpc-server-options',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help="Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}",
default=None,
)
|
"""Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction, add_arg_group
def mixin_base_runtime_parser(arg_group):
"""Mixing in arguments required by any class that extends :class:`AsynNewLoopRuntime` into the given parser.
:param arg_group: the parser instance to which we add arguments
"""
arg_group.add_argument(
'--port-in',
type=int,
default=helper.random_port(),
dest='port',
help='The port for input data to bind to, default a random port between [49152, 65535]',
)
arg_group.add_argument(
'--host-in',
type=str,
default=__default_host__,
help=f'The host address for binding to, by default it is {__default_host__}',
)
arg_group.add_argument(
'--native',
action='store_true',
default=False,
help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.',
)
arg_group.add_argument(
'--output-array-type',
type=str,
default=None,
help='''
The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
''',
)
arg_group.add_argument(
'--grpc-server-options',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help="Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}",
default=None,
)
arg_group.add_argument(
'--exit-on-exceptions',
type=str,
default=[],
nargs='*',
help='List of exceptions that will cause the Executor to shut down.',
)
|
from typing import List, Optional
import numpy as np
import pytest
from docarray import DocList
from docarray.base_doc.doc import BaseDoc
from docarray.typing import NdArray
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
def test_equal_nested_docs():
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
assert nested_docs == nested_docs
@pytest.fixture
def nested_docs():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
hello: str = 'world'
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
return nested_docs
def test_nested_to_dict(nested_docs):
d = nested_docs.dict()
assert (d['docs'][0]['simple_tens'] == np.ones(10)).all()
def test_nested_to_dict_exclude(nested_docs):
d = nested_docs.dict(exclude={'docs'})
assert 'docs' not in d.keys()
def test_nested_to_dict_exclude_set(nested_docs):
d = nested_docs.dict(exclude={'hello'})
assert 'hello' not in d.keys()
def test_nested_to_dict_exclude_dict(nested_docs):
d = nested_docs.dict(exclude={'hello': True})
assert 'hello' not in d.keys()
def test_nested_to_json(nested_docs):
d = nested_docs.json()
nested_docs.__class__.parse_raw(d)
@pytest.fixture
def nested_none_docs():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: Optional[DocList[SimpleDoc]]
hello: str = 'world'
nested_docs = NestedDoc()
return nested_docs
def test_nested_none_to_dict(nested_none_docs):
d = nested_none_docs.dict()
assert d == {'docs': None, 'hello': 'world', 'id': nested_none_docs.id}
def test_nested_none_to_json(nested_none_docs):
d = nested_none_docs.json()
d = nested_none_docs.__class__.parse_raw(d)
assert d.dict() == {'docs': None, 'hello': 'world', 'id': nested_none_docs.id}
|
from typing import List, Optional
import numpy as np
import pytest
from docarray import DocList
from docarray.base_doc.doc import BaseDoc
from docarray.typing import NdArray
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
def test_equal_nested_docs():
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
assert nested_docs == nested_docs
@pytest.fixture
def nested_docs():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
hello: str = 'world'
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
return nested_docs
def test_nested_to_dict(nested_docs):
d = nested_docs.dict()
assert (d['docs'][0]['simple_tens'] == np.ones(10)).all()
def test_nested_to_dict_exclude(nested_docs):
d = nested_docs.dict(exclude={'docs'})
assert 'docs' not in d.keys()
def test_nested_to_dict_exclude_set(nested_docs):
d = nested_docs.dict(exclude={'hello'})
assert 'hello' not in d.keys()
def test_nested_to_dict_exclude_dict(nested_docs):
d = nested_docs.dict(exclude={'hello': True})
assert 'hello' not in d.keys()
def test_nested_to_json(nested_docs):
d = nested_docs.json()
nested_docs.__class__.parse_raw(d)
|
from typing import Any, ForwardRef, Optional
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
from docarray.typing.id import ID
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (
(get_origin(x) in (list, tuple, dict, set))
or is_typevar(x)
or (type(x) == ForwardRef)
or is_typevar(x)
or x == ID
):
return False
return issubclass(x, a_tuple)
|
from typing import Any, ForwardRef, Optional
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (
(get_origin(x) in (list, tuple, dict, set))
or is_typevar(x)
or (type(x) == ForwardRef)
):
return False
return issubclass(x, a_tuple)
|
__version__ = '0.38.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.38.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from docarray.proto import NodeProto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.helper import _uri_to_blob
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(text_url=str(self))
def load_to_bytes(self, timeout: Optional[float] = None) -> bytes:
"""
Load the text file into a bytes object.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import TextUrl
class MyDoc(BaseDocument):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt_bytes = doc.remote_url.load_to_bytes()
local_txt_bytes = doc.local_url.load_to_bytes()
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: the text file content as bytes
"""
return _uri_to_blob(self, timeout=timeout)
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import TextUrl
class MyDoc(BaseDocument):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt = doc.remote_url.load()
print(remote_txt)
# prints: ```<!DOCTYPE html>\n<html class="client-nojs" ... > ...```
local_txt = doc.local_url.load()
print(local_txt)
# prints content of my_file.txt
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = _uri_to_blob(self, timeout=timeout)
return _bytes.decode(charset)
|
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from docarray.proto import NodeProto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.helper import _uri_to_blob
class TextUrl(AnyUrl):
"""
URL to a text file.
Cane be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(text_url=str(self))
def load_to_bytes(self, timeout: Optional[float] = None) -> bytes:
"""
Load the text file into a bytes object.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import TextUrl
class MyDoc(BaseDocument):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt_bytes = doc.remote_url.load_to_bytes()
local_txt_bytes = doc.local_url.load_to_bytes()
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: the text file content as bytes
"""
return _uri_to_blob(self, timeout=timeout)
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import TextUrl
class MyDoc(BaseDocument):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt = doc.remote_url.load()
print(remote_txt)
# prints: ```<!DOCTYPE html>\n<html class="client-nojs" ... > ...```
local_txt = doc.local_url.load()
print(local_txt)
# prints content of my_file.txt
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = _uri_to_blob(self, timeout=timeout)
return _bytes.decode(charset)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
traversal_paths: Iterable[str] = ('r',),
device: str = 'cpu',
*args,
**kwargs
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.aclp = AudioCLIP(pretrained=model_path).to(device).eval()
self.traversal_paths = traversal_paths
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if docs:
cleaned_document_array = self._get_input_data(docs, parameters)
self._create_embeddings(cleaned_document_array)
def _get_input_data(self, docs: DocumentArray, parameters: dict):
"""Create a filtered set of Documents to iterate over."""
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
# traverse thought all documents which have to be processed
flat_docs = docs.traverse_flat(traversal_paths)
# filter out documents without audio wav
filtered_docs = DocumentArray(
[doc for doc in flat_docs if doc.blob is not None]
)
return filtered_docs
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.aclp.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
default_traversal_paths: Iterable[str] = ('r',),
device: str = 'cpu',
*args,
**kwargs
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param default_traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.aclp = AudioCLIP(pretrained=model_path).to(device).eval()
self.default_traversal_paths = default_traversal_paths
@requests
def encode(
self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs
) -> Any:
if docs:
cleaned_document_array = self._get_input_data(docs, parameters)
self._create_embeddings(cleaned_document_array)
def _get_input_data(self, docs: DocumentArray, parameters: dict):
"""Create a filtered set of Documents to iterate over."""
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
# traverse thought all documents which have to be processed
flat_docs = docs.traverse_flat(traversal_paths)
# filter out documents without audio wav
filtered_docs = DocumentArray(
[doc for doc in flat_docs if doc.blob is not None]
)
return filtered_docs
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.aclp.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from fsspec.implementations.local import LocalFileSystem
from ..utils.deprecation_utils import deprecated
from . import compression
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
@deprecated(
"This function is deprecated and will be removed in a future version. Please use `fsspec.core.strip_protocol` instead."
)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Checks if `fs` is a remote filesystem.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
return not isinstance(fs, LocalFileSystem)
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
if not is_remote_filesystem(fs):
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
def _reset_fsspec_lock() -> None:
"""
Clear reference to the loop and thread.
This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
Only required for fsspec >= 0.9.0
See https://github.com/fsspec/gcsfs/issues/379
"""
if hasattr(fsspec.asyn, "reset_lock"):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
fsspec.asyn.lock = threading.Lock()
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Validates if filesystem has remote protocol.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
if fs is not None:
protocols = (p,) if isinstance(p := fs.protocol, str) else p
if "file" not in protocols:
return True
return False
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
is_local = not is_remote_filesystem(fs)
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
def _reset_fsspec_lock() -> None:
"""
Clear reference to the loop and thread.
This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
Only required for fsspec >= 0.9.0
See https://github.com/fsspec/gcsfs/issues/379
"""
if hasattr(fsspec.asyn, "reset_lock"):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
fsspec.asyn.lock = threading.Lock()
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
from docarray.typing import AnyEmbedding, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDoc):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray of shape `(n_samples, 3)` and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain:
- a [`PointCloud3DUrl`][docarray.typing.url.PointCloud3DUrl] (`PointCloud3D.url`)
- a [`PointsAndColors`][docarray.documents.point_cloud.points_and_colors.PointsAndColors] object (`PointCloud3D.tensors`)
- an [`AnyEmbedding`](../../../../api_references/typing/tensor/embedding) (`PointCloud3D.embedding`)
- a `bytes` object (`PointCloud3D.bytes_`)
You can use this Document directly:
```python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
# model = MyEmbeddingModel()
# pc.embedding = model(pc.tensors.points)
```
You can extend this Document:
```python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
# model = MyEmbeddingModel()
# pc.embedding = model(pc.tensors.points)
# pc.second_embedding = model(pc.tensors.colors)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import PointCloud3D, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
point_cloud: PointCloud3D
text: TextDoc
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensors = mmdoc.point_cloud.url.load(samples=100)
# or
mmdoc.point_cloud.bytes_ = mmdoc.point_cloud.url.load_bytes()
```
You can display your point cloud from either its url, or its tensors:
```python
from docarray.documents import PointCloud3D
# display from url
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
# pc.url.display()
# display from tensors
pc.tensors = pc.url.load(samples=10000)
# pc.tensors.display()
```
"""
url: Optional[PointCloud3DUrl] = Field(
description='URL to a file containing point cloud information. Can be remote (web) URL, or a local file path.',
example='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj',
default=None,
)
tensors: Optional[PointsAndColors] = Field(
description='A tensor object of 3D point cloud of type `PointsAndColors`.',
example=[[0, 0, 1], [1, 0, 1], [0, 1, 1]],
default=None,
)
embedding: Optional[AnyEmbedding] = Field(
description='Store an embedding: a vector representation of 3D point cloud.',
example=[1, 1, 1],
default=None,
)
bytes_: Optional[bytes] = Field(
description='Bytes representation of 3D point cloud.',
default=None,
)
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensors=PointsAndColors(points=value))
return super().validate(value)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
from docarray.typing import AnyEmbedding, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDoc):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray of shape `(n_samples, 3)` and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain:
- a [`PointCloud3DUrl`][docarray.typing.url.PointCloud3DUrl] (`PointCloud3D.url`)
- a [`PointsAndColors`][docarray.documents.point_cloud.points_and_colors.PointsAndColors] object (`PointCloud3D.tensors`)
- an [`AnyEmbedding`](../../../../api_references/typing/tensor/embedding) (`PointCloud3D.embedding`)
- a `bytes` object (`PointCloud3D.bytes_`)
You can use this Document directly:
```python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
# model = MyEmbeddingModel()
# pc.embedding = model(pc.tensors.points)
```
You can extend this Document:
```python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
# model = MyEmbeddingModel()
# pc.embedding = model(pc.tensors.points)
# pc.second_embedding = model(pc.tensors.colors)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import PointCloud3D, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
point_cloud: PointCloud3D
text: TextDoc
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensors = mmdoc.point_cloud.url.load(samples=100)
# or
mmdoc.point_cloud.bytes_ = mmdoc.point_cloud.url.load_bytes()
```
You can display your point cloud from either its url, or its tensors:
```python
from docarray.documents import PointCloud3D
# display from url
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
# pc.url.display()
# display from tensors
pc.tensors = pc.url.load(samples=10000)
# pc.tensors.display()
```
"""
url: Optional[PointCloud3DUrl] = None
tensors: Optional[PointsAndColors] = None
embedding: Optional[AnyEmbedding] = None
bytes_: Optional[bytes] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensors=PointsAndColors(points=value))
return super().validate(value)
|
import os
import time
import pytest
from jina import Flow
from tests.integration.instrumentation import (
get_exported_jobs,
get_flow_metric_labels,
get_services,
)
def test_docker_instrumentation(
jaeger_port,
otlp_collector,
otlp_receiver_port,
docker_image_name,
docker_image_built,
prometheus_client,
expected_flow_metric_labels,
):
f = Flow(
tracing=True,
traces_exporter_host='http://localhost',
traces_exporter_port=otlp_receiver_port,
metrics=True,
metrics_exporter_host='http://localhost',
metrics_exporter_port=otlp_receiver_port,
).add(uses=f'docker://{docker_image_name}')
with f:
from jina import DocumentArray
f.post(f'/search', DocumentArray.empty(), continue_on_error=True)
# give some time for the tracing and metrics exporters to finish exporting.
# the client is slow to export the data
time.sleep(3)
services = get_services(jaeger_port)
assert set(services) == {'executor0/rep-0', 'gateway/rep-0'}
exported_jobs = get_exported_jobs(prometheus_client)
assert exported_jobs == {
'gateway/rep-0',
'executor0/rep-0',
}
flow_metric_labels = get_flow_metric_labels(prometheus_client)
assert flow_metric_labels.issubset(expected_flow_metric_labels)
|
import os
import time
import pytest
from jina import Flow
from tests.integration.instrumentation import (
get_exported_jobs,
get_flow_metric_labels,
get_services,
)
def test_docker_instrumentation(
jaeger_port,
otlp_collector,
otlp_receiver_port,
docker_image_name,
docker_image_built,
prometheus_client,
expected_flow_metric_labels,
):
f = Flow(
tracing=True,
traces_exporter_host='localhost',
traces_exporter_port=otlp_receiver_port,
metrics=True,
metrics_exporter_host='localhost',
metrics_exporter_port=otlp_receiver_port,
).add(uses=f'docker://{docker_image_name}')
with f:
from jina import DocumentArray
f.post(f'/search', DocumentArray.empty(), continue_on_error=True)
# give some time for the tracing and metrics exporters to finish exporting.
# the client is slow to export the data
time.sleep(3)
services = get_services(jaeger_port)
assert set(services) == {'executor0/rep-0', 'gateway/rep-0'}
exported_jobs = get_exported_jobs(prometheus_client)
assert exported_jobs == {
'gateway/rep-0',
'executor0/rep-0',
}
flow_metric_labels = get_flow_metric_labels(prometheus_client)
assert flow_metric_labels.issubset(expected_flow_metric_labels)
|
from __future__ import annotations
from typing import Any, Optional, Union
import torch
from ._tv_tensor import TVTensor
class Video(TVTensor):
""":class:`torch.Tensor` subclass for videos with shape ``[..., T, C, H, W]``.
Args:
data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the video is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Video:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if data.ndim < 4:
raise ValueError
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
from __future__ import annotations
from typing import Any, Optional, Union
import torch
from ._tv_tensor import TVTensor
class Video(TVTensor):
""":class:`torch.Tensor` subclass for videos.
Args:
data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the video is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Video:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if data.ndim < 4:
raise ValueError
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
_base_ = './solov2_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 768), (1333, 736), (1333, 704),
(1333, 672), (1333, 640)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 3x
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
_base_ = './solov2_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 768), (1333, 736), (1333, 704),
(1333, 672), (1333, 640)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 3x
max_epochs = 36
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray.typing import T
class MeshDataMixin:
"""Provide helper functions for :class:`Document` to support 3D mesh data and point cloud."""
def load_uri_to_point_cloud_tensor(
self: 'T', samples: int, as_chunks: bool = False
) -> 'T':
"""Convert a 3d mesh-like :attr:`.uri` into :attr:`.tensor`
:param samples: number of points to sample from the mesh
:param as_chunks: when multiple geometry stored in one mesh file,
then store each geometry into different :attr:`.chunks`
:return: itself after processed
"""
import trimesh
import urllib.parse
scheme = urllib.parse.urlparse(self.uri).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
if as_chunks:
from docarray.document import Document
# try to coerce everything into a scene
scene = loader(self.uri, force='scene')
for geo in scene.geometry.values():
geo: trimesh.Trimesh
self.chunks.append(Document(tensor=np.array(geo.sample(samples))))
else:
# combine a scene into a single mesh
mesh = loader(self.uri, force='mesh')
self.tensor = np.array(mesh.sample(samples))
return self
|
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from ...typing import T
class MeshDataMixin:
"""Provide helper functions for :class:`Document` to support 3D mesh data and point cloud."""
def load_uri_to_point_cloud_tensor(
self: 'T', samples: int, as_chunks: bool = False
) -> 'T':
"""Convert a 3d mesh-like :attr:`.uri` into :attr:`.tensor`
:param samples: number of points to sample from the mesh
:param as_chunks: when multiple geometry stored in one mesh file,
then store each geometry into different :attr:`.chunks`
:return: itself after processed
"""
import trimesh
import urllib.parse
scheme = urllib.parse.urlparse(self.uri).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
if as_chunks:
from .. import Document
# try to coerce everything into a scene
scene = loader(self.uri, force='scene')
for geo in scene.geometry.values():
geo: trimesh.Trimesh
self.chunks.append(Document(tensor=np.array(geo.sample(samples))))
else:
# combine a scene into a single mesh
mesh = loader(self.uri, force='mesh')
self.tensor = np.array(mesh.sample(samples))
return self
|
"""A simple progress bar for the console."""
import threading
from collections.abc import Sequence
from typing import Any, Optional
from uuid import UUID
from langchain_core.callbacks import base as base_callbacks
from langchain_core.documents import Document
from langchain_core.outputs import LLMResult
class ProgressBarCallback(base_callbacks.BaseCallbackHandler):
"""A simple progress bar for the console."""
def __init__(self, total: int, ncols: int = 50, **kwargs: Any):
"""Initialize the progress bar.
Args:
total: int, the total number of items to be processed.
ncols: int, the character width of the progress bar.
"""
self.total = total
self.ncols = ncols
self.counter = 0
self.lock = threading.Lock()
self._print_bar()
def increment(self) -> None:
"""Increment the counter and update the progress bar."""
with self.lock:
self.counter += 1
self._print_bar()
def _print_bar(self) -> None:
"""Print the progress bar to the console."""
progress = self.counter / self.total
arrow = "-" * int(round(progress * self.ncols) - 1) + ">"
spaces = " " * (self.ncols - len(arrow))
print(f"\r[{arrow + spaces}] {self.counter}/{self.total}", end="") # noqa: T201
def on_chain_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_chain_end(
self,
outputs: dict[str, Any],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_retriever_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_retriever_end(
self,
documents: Sequence[Document],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_tool_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_tool_end(
self,
output: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
|
"""A simple progress bar for the console."""
import threading
from typing import Any, Dict, Optional, Sequence
from uuid import UUID
from langchain_core.callbacks import base as base_callbacks
from langchain_core.documents import Document
from langchain_core.outputs import LLMResult
class ProgressBarCallback(base_callbacks.BaseCallbackHandler):
"""A simple progress bar for the console."""
def __init__(self, total: int, ncols: int = 50, **kwargs: Any):
"""Initialize the progress bar.
Args:
total: int, the total number of items to be processed.
ncols: int, the character width of the progress bar.
"""
self.total = total
self.ncols = ncols
self.counter = 0
self.lock = threading.Lock()
self._print_bar()
def increment(self) -> None:
"""Increment the counter and update the progress bar."""
with self.lock:
self.counter += 1
self._print_bar()
def _print_bar(self) -> None:
"""Print the progress bar to the console."""
progress = self.counter / self.total
arrow = "-" * int(round(progress * self.ncols) - 1) + ">"
spaces = " " * (self.ncols - len(arrow))
print(f"\r[{arrow + spaces}] {self.counter}/{self.total}", end="") # noqa: T201
def on_chain_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_chain_end(
self,
outputs: Dict[str, Any],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_retriever_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_retriever_end(
self,
documents: Sequence[Document],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_tool_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
def on_tool_end(
self,
output: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if parent_run_id is None:
self.increment()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet_v2 import MobileNetV2 as MobileNetV2
from keras.src.applications.mobilenet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.mobilenet_v2 import (
preprocess_input as preprocess_input,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet_v2 import MobileNetV2
from keras.src.applications.mobilenet_v2 import decode_predictions
from keras.src.applications.mobilenet_v2 import preprocess_input
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv import ConfigDict
from mmdet.models.dense_heads import DETRHead
def test_detr_head_loss():
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3),
'batch_input_shape': (s, s)
}]
config = ConfigDict(
dict(
type='DETRHead',
num_classes=80,
in_channels=200,
transformer=dict(
type='Transformer',
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1)
],
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm')),
)),
positional_encoding=dict(
type='SinePositionalEncoding', num_feats=128, normalize=True),
loss_cls=dict(
type='CrossEntropyLoss',
bg_cls_weight=0.1,
use_sigmoid=False,
loss_weight=1.0,
class_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0)))
self = DETRHead(**config)
self.init_weights()
feat = [torch.rand(1, 200, 10, 10)]
cls_scores, bbox_preds = self.forward(feat, img_metas)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
assert loss.item() > 0, 'cls loss should be non-zero'
elif 'bbox' in key:
assert loss.item(
) == 0, 'there should be no box loss when there are no true boxes'
elif 'iou' in key:
assert loss.item(
) == 0, 'there should be no iou loss when there are no true boxes'
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
for loss in one_gt_losses.values():
assert loss.item(
) > 0, 'cls loss, or box loss, or iou loss should be non-zero'
# test forward_train
self.forward_train(feat, img_metas, gt_bboxes, gt_labels)
# test inference mode
self.get_bboxes(cls_scores, bbox_preds, img_metas, rescale=True)
|
import torch
from mmcv import ConfigDict
from mmdet.models.dense_heads import DETRHead
def test_detr_head_loss():
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3),
'batch_input_shape': (s, s)
}]
config = ConfigDict(
dict(
type='DETRHead',
num_classes=80,
in_channels=200,
transformer=dict(
type='Transformer',
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1)
],
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm')),
)),
positional_encoding=dict(
type='SinePositionalEncoding', num_feats=128, normalize=True),
loss_cls=dict(
type='CrossEntropyLoss',
bg_cls_weight=0.1,
use_sigmoid=False,
loss_weight=1.0,
class_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0)))
self = DETRHead(**config)
self.init_weights()
feat = [torch.rand(1, 200, 10, 10)]
cls_scores, bbox_preds = self.forward(feat, img_metas)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
assert loss.item() > 0, 'cls loss should be non-zero'
elif 'bbox' in key:
assert loss.item(
) == 0, 'there should be no box loss when there are no true boxes'
elif 'iou' in key:
assert loss.item(
) == 0, 'there should be no iou loss when there are no true boxes'
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
for loss in one_gt_losses.values():
assert loss.item(
) > 0, 'cls loss, or box loss, or iou loss should be non-zero'
# test forward_train
self.forward_train(feat, img_metas, gt_bboxes, gt_labels)
# test inference mode
self.get_bboxes(cls_scores, bbox_preds, img_metas, rescale=True)
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.augmentation_accuracy_metric import (
AugmentationAccuracyMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class AugmentationAccuracyEvaluator(BaseEvaluator):
"""
Tonic Validate's augmentation accuracy metric.
The output score is a float between 0.0 and 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AugmentationAccuracyMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.augmentation_accuracy_metric import (
AugmentationAccuracyMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class AugmentationAccuracyEvaluator(BaseEvaluator):
"""Tonic Validate's augmentation accuracy metric.
The output score is a float between 0.0 and 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AugmentationAccuracyMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
"""
This evaluator extends :class:`~sentence_transformers.evaluation.TranslationEvaluator` but is specifically designed for sparse encoder models.
Given two sets of sentences in different languages, e.g. (en_1, en_2, en_3...) and (fr_1, fr_2, fr_3, ...),
and assuming that fr_i is the translation of en_i.
Checks if vec(en_i) has the highest similarity to vec(fr_i). Computes the accuracy in both directions
The labels need to indicate the similarity between the sentences.
Args:
source_sentences (List[str]): List of sentences in the source language.
target_sentences (List[str]): List of sentences in the target language.
show_progress_bar (bool): Whether to show a progress bar when computing embeddings. Defaults to False.
batch_size (int): The batch size to compute sentence embeddings. Defaults to 16.
name (str): The name of the evaluator. Defaults to an empty string.
print_wrong_matches (bool): Whether to print incorrect matches. Defaults to False.
write_csv (bool): Whether to write the evaluation results to a CSV file. Defaults to True.
max_active_dims (Optional[int], optional): The maximum number of active dimensions to use.
`None` uses the model's current `max_active_dims`. Defaults to None.
Example:
::
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
'''
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
'''
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
"""
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
max_active_dims: int | None = None,
):
self.max_active_dims = max_active_dims
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=None,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
save_to_cpu=True,
max_active_dims=self.max_active_dims,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
"""
This evaluator extends :class:`~sentence_transformers.evaluation.TranslationEvaluator` but is specifically designed for sparse encoder models.
Given two sets of sentences in different languages, e.g. (en_1, en_2, en_3...) and (fr_1, fr_2, fr_3, ...),
and assuming that fr_i is the translation of en_i.
Checks if vec(en_i) has the highest similarity to vec(fr_i). Computes the accuracy in both directions
The labels need to indicate the similarity between the sentences.
Args:
source_sentences (List[str]): List of sentences in the source language.
target_sentences (List[str]): List of sentences in the target language.
show_progress_bar (bool): Whether to show a progress bar when computing embeddings. Defaults to False.
batch_size (int): The batch size to compute sentence embeddings. Defaults to 16.
name (str): The name of the evaluator. Defaults to an empty string.
print_wrong_matches (bool): Whether to print incorrect matches. Defaults to False.
write_csv (bool): Whether to write the evaluation results to a CSV file. Defaults to True.
truncate_dim (int, optional): The dimension to truncate sentence embeddings to. If None, the model's
current truncation dimension will be used. Defaults to None.
Example:
::
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
'''
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
'''
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
"""
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
save_to_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers import AutoencoderKLCosmos
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class AutoencoderKLCosmosTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = AutoencoderKLCosmos
main_input_name = "sample"
base_precision = 1e-2
def get_autoencoder_kl_cosmos_config(self):
return {
"in_channels": 3,
"out_channels": 3,
"latent_channels": 4,
"encoder_block_out_channels": (8, 8, 8, 8),
"decode_block_out_channels": (8, 8, 8, 8),
"attention_resolutions": (8,),
"resolution": 64,
"num_layers": 2,
"patch_size": 4,
"patch_type": "haar",
"scaling_factor": 1.0,
"spatial_compression_ratio": 4,
"temporal_compression_ratio": 4,
}
@property
def dummy_input(self):
batch_size = 2
num_frames = 9
num_channels = 3
height = 32
width = 32
image = floats_tensor((batch_size, num_channels, num_frames, height, width)).to(torch_device)
return {"sample": image}
@property
def input_shape(self):
return (3, 9, 32, 32)
@property
def output_shape(self):
return (3, 9, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = self.get_autoencoder_kl_cosmos_config()
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {
"CosmosEncoder3d",
"CosmosDecoder3d",
}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
@unittest.skip("Not sure why this test fails. Investigate later.")
def test_effective_gradient_checkpointing(self):
pass
@unittest.skip("Unsupported test.")
def test_forward_with_norm_groups(self):
pass
|
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers import AutoencoderKLCosmos
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class AutoencoderKLCosmosTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = AutoencoderKLCosmos
main_input_name = "sample"
base_precision = 1e-2
def get_autoencoder_kl_cosmos_config(self):
return {
"in_channels": 3,
"out_channels": 3,
"latent_channels": 4,
"encoder_block_out_channels": (8, 8, 8, 8),
"decode_block_out_channels": (8, 8, 8, 8),
"attention_resolutions": (8,),
"resolution": 64,
"num_layers": 2,
"patch_size": 4,
"patch_type": "haar",
"scaling_factor": 1.0,
"spatial_compression_ratio": 4,
"temporal_compression_ratio": 4,
}
@property
def dummy_input(self):
batch_size = 2
num_frames = 9
num_channels = 3
height = 32
width = 32
image = floats_tensor((batch_size, num_channels, num_frames, height, width)).to(torch_device)
return {"sample": image}
@property
def input_shape(self):
return (3, 9, 32, 32)
@property
def output_shape(self):
return (3, 9, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = self.get_autoencoder_kl_cosmos_config()
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {
"CosmosEncoder3d",
"CosmosDecoder3d",
}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
@unittest.skip("Not sure why this test fails. Investigate later.")
def test_effective_gradient_checkpointing(self):
pass
@unittest.skip("Unsupported test.")
def test_forward_with_norm_groups(self):
pass
|
"""
Prompts from evaporate repo.
Full credits go to: https://github.com/HazyResearch/evaporate
"""
from llama_index.core.prompts import PromptTemplate
# deprecated, kept for backward compatibility
"""Pandas PromptTemplate. Convert query to python code.
Required template variables: `chunk`, `topic`.
Args:
template (str): Template for the PromptTemplate.
**prompt_kwargs: Keyword arguments for the PromptTemplate.
"""
SchemaIDPrompt = PromptTemplate
"""Function generation PromptTemplate. Generate a function from existing text.
Required template variables: `context_str`, `query_str`,
`attribute`, `function_field`.
Args:
template (str): Template for the PromptTemplate.
**prompt_kwargs: Keyword arguments for the PromptTemplate.
"""
FnGeneratePrompt = PromptTemplate
# used for schema identification
SCHEMA_ID_PROMPT_TMPL = """Sample text:
<tr class="mergedrow"><th scope="row" class="infobox-label"><div style="text-indent:-0.9em;margin-left:1.2em;font-weight:normal;">• <a href="/wiki/Monarchy_of_Canada" title="Monarchy of Canada">Monarch</a> </div></th><td class="infobox-data"><a href="/wiki/Charles_III" title="Charles III">Charles III</a></td></tr>
<tr class="mergedrow"><th scope="row" class="infobox-label"><div style="text-indent:-0.9em;margin-left:1.2em;font-weight:normal;">• <span class="nowrap"><a href="/wiki/Governor_General_of_Canada" title="Governor General of Canada">Governor General</a></span> </div></th><td class="infobox-data"><a href="/wiki/Mary_Simon" title="Mary Simon">Mary Simon</a></td></tr>
<b>Provinces and Territories</b class='navlinking countries'>
<ul>
<li>Saskatchewan</li>
<li>Manitoba</li>
<li>Ontario</li>
<li>Quebec</li>
<li>New Brunswick</li>
<li>Prince Edward Island</li>
<li>Nova Scotia</li>
<li>Newfoundland and Labrador</li>
<li>Yukon</li>
<li>Nunavut</li>
<li>Northwest Territories</li>
</ul>
Question: List all relevant attributes about 'Canada' that are exactly mentioned in this sample text if any.
Answer:
- Monarch: Charles III
- Governor General: Mary Simon
- Provinces and Territories: Saskatchewan, Manitoba, Ontario, Quebec, New Brunswick, Prince Edward Island, Nova Scotia, Newfoundland and Labrador, Yukon, Nunavut, Northwest Territories
----
Sample text:
Patient birth date: 1990-01-01
Prescribed medication: aspirin, ibuprofen, acetaminophen
Prescribed dosage: 1 tablet, 2 tablets, 3 tablets
Doctor's name: Dr. Burns
Date of discharge: 2020-01-01
Hospital address: 123 Main Street, New York, NY 10001
Question: List all relevant attributes about 'medications' that are exactly mentioned in this sample text if any.
Answer:
- Prescribed medication: aspirin, ibuprofen, acetaminophen
- Prescribed dosage: 1 tablet, 2 tablets, 3 tablets
----
Sample text:
{chunk:}
Question: List all relevant attributes about '{topic:}' that are exactly mentioned in this sample text if any.
Answer:"""
SCHEMA_ID_PROMPT = PromptTemplate(SCHEMA_ID_PROMPT_TMPL)
# used for function generation
FN_GENERATION_PROMPT_TMPL = """Here is a sample of text:
{context_str:}
Question: {query_str:}
Given the function signature, write Python code to extract the
"{attribute:}" field from the text.
Return the result as a single value (string, int, float), and not a list.
Make sure there is a return statement in the code. Do not leave out a return statement.
{expected_output_str:}
import re
def get_{function_field:}_field(text: str):
\"""
Function to extract the "{attribute:} field", and return the result
as a single value.
\"""
"""
FN_GENERATION_PROMPT = PromptTemplate(FN_GENERATION_PROMPT_TMPL)
FN_GENERATION_LIST_PROMPT_TMPL = """Here is a sample of text:
{context_str:}
Question: {query_str:}
Given the function signature, write Python code to extract the
"{attribute:}" field from the text.
Return the result as a list of values (if there is just one item, return a single \
element list).
Make sure there is a return statement in the code. Do not leave out a return statement.
{expected_output_str:}
import re
def get_{function_field:}_field(text: str) -> List:
\"""
Function to extract the "{attribute:} field", and return the result
as a single value.
\"""
"""
FN_GENERATION_LIST_PROMPT = PromptTemplate(FN_GENERATION_LIST_PROMPT_TMPL)
DEFAULT_EXPECTED_OUTPUT_PREFIX_TMPL = (
"Here is the expected output on the text after running the function. "
"Please do not write a function that would return a different output. "
"Expected output: "
)
DEFAULT_FIELD_EXTRACT_QUERY_TMPL = (
'Write a python function to extract the entire "{field}" field from text, '
"but not any other metadata. Return the result as a list."
)
|
"""Prompts from evaporate repo.
Full credits go to: https://github.com/HazyResearch/evaporate
"""
from llama_index.core.prompts import PromptTemplate
# deprecated, kept for backward compatibility
"""Pandas PromptTemplate. Convert query to python code.
Required template variables: `chunk`, `topic`.
Args:
template (str): Template for the PromptTemplate.
**prompt_kwargs: Keyword arguments for the PromptTemplate.
"""
SchemaIDPrompt = PromptTemplate
"""Function generation PromptTemplate. Generate a function from existing text.
Required template variables: `context_str`, `query_str`,
`attribute`, `function_field`.
Args:
template (str): Template for the PromptTemplate.
**prompt_kwargs: Keyword arguments for the PromptTemplate.
"""
FnGeneratePrompt = PromptTemplate
# used for schema identification
SCHEMA_ID_PROMPT_TMPL = """Sample text:
<tr class="mergedrow"><th scope="row" class="infobox-label"><div style="text-indent:-0.9em;margin-left:1.2em;font-weight:normal;">• <a href="/wiki/Monarchy_of_Canada" title="Monarchy of Canada">Monarch</a> </div></th><td class="infobox-data"><a href="/wiki/Charles_III" title="Charles III">Charles III</a></td></tr>
<tr class="mergedrow"><th scope="row" class="infobox-label"><div style="text-indent:-0.9em;margin-left:1.2em;font-weight:normal;">• <span class="nowrap"><a href="/wiki/Governor_General_of_Canada" title="Governor General of Canada">Governor General</a></span> </div></th><td class="infobox-data"><a href="/wiki/Mary_Simon" title="Mary Simon">Mary Simon</a></td></tr>
<b>Provinces and Territories</b class='navlinking countries'>
<ul>
<li>Saskatchewan</li>
<li>Manitoba</li>
<li>Ontario</li>
<li>Quebec</li>
<li>New Brunswick</li>
<li>Prince Edward Island</li>
<li>Nova Scotia</li>
<li>Newfoundland and Labrador</li>
<li>Yukon</li>
<li>Nunavut</li>
<li>Northwest Territories</li>
</ul>
Question: List all relevant attributes about 'Canada' that are exactly mentioned in this sample text if any.
Answer:
- Monarch: Charles III
- Governor General: Mary Simon
- Provinces and Territories: Saskatchewan, Manitoba, Ontario, Quebec, New Brunswick, Prince Edward Island, Nova Scotia, Newfoundland and Labrador, Yukon, Nunavut, Northwest Territories
----
Sample text:
Patient birth date: 1990-01-01
Prescribed medication: aspirin, ibuprofen, acetaminophen
Prescribed dosage: 1 tablet, 2 tablets, 3 tablets
Doctor's name: Dr. Burns
Date of discharge: 2020-01-01
Hospital address: 123 Main Street, New York, NY 10001
Question: List all relevant attributes about 'medications' that are exactly mentioned in this sample text if any.
Answer:
- Prescribed medication: aspirin, ibuprofen, acetaminophen
- Prescribed dosage: 1 tablet, 2 tablets, 3 tablets
----
Sample text:
{chunk:}
Question: List all relevant attributes about '{topic:}' that are exactly mentioned in this sample text if any.
Answer:"""
SCHEMA_ID_PROMPT = PromptTemplate(SCHEMA_ID_PROMPT_TMPL)
# used for function generation
FN_GENERATION_PROMPT_TMPL = """Here is a sample of text:
{context_str:}
Question: {query_str:}
Given the function signature, write Python code to extract the
"{attribute:}" field from the text.
Return the result as a single value (string, int, float), and not a list.
Make sure there is a return statement in the code. Do not leave out a return statement.
{expected_output_str:}
import re
def get_{function_field:}_field(text: str):
\"""
Function to extract the "{attribute:} field", and return the result
as a single value.
\"""
"""
FN_GENERATION_PROMPT = PromptTemplate(FN_GENERATION_PROMPT_TMPL)
FN_GENERATION_LIST_PROMPT_TMPL = """Here is a sample of text:
{context_str:}
Question: {query_str:}
Given the function signature, write Python code to extract the
"{attribute:}" field from the text.
Return the result as a list of values (if there is just one item, return a single \
element list).
Make sure there is a return statement in the code. Do not leave out a return statement.
{expected_output_str:}
import re
def get_{function_field:}_field(text: str) -> List:
\"""
Function to extract the "{attribute:} field", and return the result
as a single value.
\"""
"""
FN_GENERATION_LIST_PROMPT = PromptTemplate(FN_GENERATION_LIST_PROMPT_TMPL)
DEFAULT_EXPECTED_OUTPUT_PREFIX_TMPL = (
"Here is the expected output on the text after running the function. "
"Please do not write a function that would return a different output. "
"Expected output: "
)
DEFAULT_FIELD_EXTRACT_QUERY_TMPL = (
'Write a python function to extract the entire "{field}" field from text, '
"but not any other metadata. Return the result as a list."
)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogView3PlusTransformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogView3PlusTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogView3PlusTransformer2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
original_size = torch.tensor([height * 8, width * 8]).unsqueeze(0).repeat(batch_size, 1).to(torch_device)
target_size = torch.tensor([height * 8, width * 8]).unsqueeze(0).repeat(batch_size, 1).to(torch_device)
crop_coords = torch.tensor([0, 0]).unsqueeze(0).repeat(batch_size, 1).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"original_size": original_size,
"target_size": target_size,
"crop_coords": crop_coords,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": 2,
"in_channels": 4,
"num_layers": 1,
"attention_head_dim": 4,
"num_attention_heads": 2,
"out_channels": 4,
"text_embed_dim": 8,
"time_embed_dim": 8,
"condition_dim": 2,
"pos_embed_max_size": 8,
"sample_size": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogView3PlusTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogView3PlusTransformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogView3PlusTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogView3PlusTransformer2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
original_size = torch.tensor([height * 8, width * 8]).unsqueeze(0).repeat(batch_size, 1).to(torch_device)
target_size = torch.tensor([height * 8, width * 8]).unsqueeze(0).repeat(batch_size, 1).to(torch_device)
crop_coords = torch.tensor([0, 0]).unsqueeze(0).repeat(batch_size, 1).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"original_size": original_size,
"target_size": target_size,
"crop_coords": crop_coords,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": 2,
"in_channels": 4,
"num_layers": 1,
"attention_head_dim": 4,
"num_attention_heads": 2,
"out_channels": 4,
"text_embed_dim": 8,
"time_embed_dim": 8,
"condition_dim": 2,
"pos_embed_max_size": 8,
"sample_size": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
|
# Owner(s): ["module: unknown"]
import glob
import io
import os
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
try:
from third_party.build_bundled import create_bundled
except ImportError:
create_bundled = None
license_file = "third_party/LICENSES_BUNDLED.txt"
starting_txt = "The PyTorch repository and source distributions bundle"
site_packages = os.path.dirname(os.path.dirname(torch.__file__))
distinfo = glob.glob(os.path.join(site_packages, "torch-*dist-info"))
class TestLicense(TestCase):
@unittest.skipIf(not create_bundled, "can only be run in a source tree")
def test_license_for_wheel(self):
current = io.StringIO()
create_bundled("third_party", current)
with open(license_file) as fid:
src_tree = fid.read()
if not src_tree == current.getvalue():
raise AssertionError(
f'the contents of "{license_file}" do not '
"match the current state of the third_party files. Use "
'"python third_party/build_bundled.py" to regenerate it'
)
@unittest.skipIf(len(distinfo) == 0, "no installation in site-package to test")
def test_distinfo_license(self):
"""If run when pytorch is installed via a wheel, the license will be in
site-package/torch-*dist-info/LICENSE. Make sure it contains the third
party bundle of licenses"""
if len(distinfo) > 1:
raise AssertionError(
'Found too many "torch-*dist-info" directories '
f'in "{site_packages}, expected only one'
)
# setuptools renamed *dist-info/LICENSE to *dist-info/licenses/LICENSE since 77.0
license_file = os.path.join(distinfo[0], "licenses", "LICENSE")
if not os.path.exists(license_file):
license_file = os.path.join(distinfo[0], "LICENSE")
with open(license_file) as fid:
txt = fid.read()
self.assertTrue(starting_txt in txt)
if __name__ == "__main__":
run_tests()
|
# Owner(s): ["module: unknown"]
import glob
import io
import os
import unittest
import torch
from torch.testing._internal.common_utils import run_tests, TestCase
try:
from third_party.build_bundled import create_bundled
except ImportError:
create_bundled = None
license_file = "third_party/LICENSES_BUNDLED.txt"
starting_txt = "The PyTorch repository and source distributions bundle"
site_packages = os.path.dirname(os.path.dirname(torch.__file__))
distinfo = glob.glob(os.path.join(site_packages, "torch-*dist-info"))
class TestLicense(TestCase):
@unittest.skipIf(not create_bundled, "can only be run in a source tree")
def test_license_for_wheel(self):
current = io.StringIO()
create_bundled("third_party", current)
with open(license_file) as fid:
src_tree = fid.read()
if not src_tree == current.getvalue():
raise AssertionError(
f'the contents of "{license_file}" do not '
"match the current state of the third_party files. Use "
'"python third_party/build_bundled.py" to regenerate it'
)
@unittest.skipIf(len(distinfo) == 0, "no installation in site-package to test")
def test_distinfo_license(self):
"""If run when pytorch is installed via a wheel, the license will be in
site-package/torch-*dist-info/LICENSE. Make sure it contains the third
party bundle of licenses"""
if len(distinfo) > 1:
raise AssertionError(
'Found too many "torch-*dist-info" directories '
f'in "{site_packages}, expected only one'
)
# setuptools renamed *dist-info/LICENSE to *dist-info/licenses/LICENSE sicne 77.0
license_file = os.path.join(distinfo[0], "licenses", "LICENSE")
if not os.path.exists(license_file):
license_file = os.path.join(distinfo[0], "LICENSE")
with open(license_file) as fid:
txt = fid.read()
self.assertTrue(starting_txt in txt)
if __name__ == "__main__":
run_tests()
|
from __future__ import annotations
import json
import logging
import re
from re import Pattern
from typing import Optional, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from pydantic import Field
from langchain.agents.agent import AgentOutputParser
from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS
from langchain.output_parsers import OutputFixingParser
logger = logging.getLogger(__name__)
class StructuredChatOutputParser(AgentOutputParser):
"""Output parser for the structured chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
action_match = self.pattern.search(text)
if action_match is not None:
response = json.loads(action_match.group(1).strip(), strict=False)
if isinstance(response, list):
# gpt turbo frequently ignores the directive to emit a single action
logger.warning("Got multiple action responses: %s", response)
response = response[0]
if response["action"] == "Final Answer":
return AgentFinish({"output": response["action_input"]}, text)
else:
return AgentAction(
response["action"], response.get("action_input", {}), text
)
else:
return AgentFinish({"output": text}, text)
except Exception as e:
raise OutputParserException(f"Could not parse LLM output: {text}") from e
@property
def _type(self) -> str:
return "structured_chat"
class StructuredChatOutputParserWithRetries(AgentOutputParser):
"""Output parser with retries for the structured chat agent."""
base_parser: AgentOutputParser = Field(default_factory=StructuredChatOutputParser)
"""The base parser to use."""
output_fixing_parser: Optional[OutputFixingParser] = None
"""The output fixing parser to use."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
if self.output_fixing_parser is not None:
parsed_obj: Union[AgentAction, AgentFinish] = (
self.output_fixing_parser.parse(text)
)
else:
parsed_obj = self.base_parser.parse(text)
return parsed_obj
except Exception as e:
raise OutputParserException(f"Could not parse LLM output: {text}") from e
@classmethod
def from_llm(
cls,
llm: Optional[BaseLanguageModel] = None,
base_parser: Optional[StructuredChatOutputParser] = None,
) -> StructuredChatOutputParserWithRetries:
if llm is not None:
base_parser = base_parser or StructuredChatOutputParser()
output_fixing_parser: OutputFixingParser = OutputFixingParser.from_llm(
llm=llm, parser=base_parser
)
return cls(output_fixing_parser=output_fixing_parser)
elif base_parser is not None:
return cls(base_parser=base_parser)
else:
return cls()
@property
def _type(self) -> str:
return "structured_chat_with_retries"
|
from __future__ import annotations
import json
import logging
import re
from typing import Optional, Pattern, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from pydantic import Field
from langchain.agents.agent import AgentOutputParser
from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS
from langchain.output_parsers import OutputFixingParser
logger = logging.getLogger(__name__)
class StructuredChatOutputParser(AgentOutputParser):
"""Output parser for the structured chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
action_match = self.pattern.search(text)
if action_match is not None:
response = json.loads(action_match.group(1).strip(), strict=False)
if isinstance(response, list):
# gpt turbo frequently ignores the directive to emit a single action
logger.warning("Got multiple action responses: %s", response)
response = response[0]
if response["action"] == "Final Answer":
return AgentFinish({"output": response["action_input"]}, text)
else:
return AgentAction(
response["action"], response.get("action_input", {}), text
)
else:
return AgentFinish({"output": text}, text)
except Exception as e:
raise OutputParserException(f"Could not parse LLM output: {text}") from e
@property
def _type(self) -> str:
return "structured_chat"
class StructuredChatOutputParserWithRetries(AgentOutputParser):
"""Output parser with retries for the structured chat agent."""
base_parser: AgentOutputParser = Field(default_factory=StructuredChatOutputParser)
"""The base parser to use."""
output_fixing_parser: Optional[OutputFixingParser] = None
"""The output fixing parser to use."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
if self.output_fixing_parser is not None:
parsed_obj: Union[AgentAction, AgentFinish] = (
self.output_fixing_parser.parse(text)
)
else:
parsed_obj = self.base_parser.parse(text)
return parsed_obj
except Exception as e:
raise OutputParserException(f"Could not parse LLM output: {text}") from e
@classmethod
def from_llm(
cls,
llm: Optional[BaseLanguageModel] = None,
base_parser: Optional[StructuredChatOutputParser] = None,
) -> StructuredChatOutputParserWithRetries:
if llm is not None:
base_parser = base_parser or StructuredChatOutputParser()
output_fixing_parser: OutputFixingParser = OutputFixingParser.from_llm(
llm=llm, parser=base_parser
)
return cls(output_fixing_parser=output_fixing_parser)
elif base_parser is not None:
return cls(base_parser=base_parser)
else:
return cls()
@property
def _type(self) -> str:
return "structured_chat_with_retries"
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.24.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.24.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
import asyncio
import random
import pytest
from docarray import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
class RequestStreamerWrapper:
def __init__(self, num_requests, prefetch, iterate_sync_in_thread):
self.num_requests = num_requests
self.requests_handled = []
self.results_handled = []
self.request_ids = [random_identity() for _ in range(num_requests)]
self.response_ids = []
args = Namespace()
args.prefetch = prefetch
self.streamer = RequestStreamer(
request_handler=self.request_handler_fn,
result_handler=self.result_handle_fn,
end_of_iter_handler=self.end_of_iter_fn,
prefetch=getattr(args, 'prefetch', 0),
iterate_sync_in_thread=iterate_sync_in_thread
)
def request_handler_fn(self, request):
self.requests_handled.append(request)
async def task():
rand_sleep = random.uniform(0.1, 0.6)
await asyncio.sleep(rand_sleep)
docs = request.docs
docs[0].tags['request_handled'] = True
request.data.docs = docs
return request
future = asyncio.ensure_future(task())
return future, None
def result_handle_fn(self, result):
self.results_handled.append(result)
assert isinstance(result, DataRequest)
docs = result.docs
docs[0].tags['result_handled'] = True
result.data.docs = docs
return result
def end_of_iter_fn(self):
# with a sync generator, iteration
assert len(self.requests_handled) == self.num_requests
assert len(self.results_handled) <= self.num_requests
def _yield_data_request(self, i):
req = DataRequest()
req.header.request_id = self.request_ids[i]
da = DocumentArray()
da.append(Document())
req.data.docs = da
return req
def _get_sync_requests_iterator(self):
for i in range(self.num_requests):
yield self._yield_data_request(i)
async def _get_async_requests_iterator(self):
for i in range(self.num_requests):
yield self._yield_data_request(i)
await asyncio.sleep(0.1)
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('async_iterator', [False, True])
@pytest.mark.parametrize('results_in_order', [False, True])
@pytest.mark.parametrize('iterate_sync_in_thread', [False, True])
async def test_request_streamer(
prefetch, num_requests, async_iterator, results_in_order, iterate_sync_in_thread
):
test_streamer = RequestStreamerWrapper(num_requests, prefetch, iterate_sync_in_thread)
streamer = test_streamer.streamer
it = (
test_streamer._get_async_requests_iterator()
if async_iterator
else test_streamer._get_sync_requests_iterator()
)
response = streamer.stream(request_iterator=it, results_in_order=results_in_order)
num_responses = 0
async for r in response:
test_streamer.response_ids.append(r.header.request_id)
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
assert len(test_streamer.request_ids) == len(test_streamer.response_ids)
if results_in_order:
for req_id, resp_id in zip(
test_streamer.request_ids, test_streamer.response_ids
):
assert req_id == resp_id
@pytest.mark.asyncio
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('iterate_sync_in_thread', [False, True])
async def test_request_streamer_process_single_data(monkeypatch, num_requests, iterate_sync_in_thread):
test_streamer = RequestStreamerWrapper(num_requests, 0, iterate_sync_in_thread)
streamer = test_streamer.streamer
def end_of_iter_fn():
# bypass some assertions in RequestStreamerWrapper.end_of_iter_fn
pass
monkeypatch.setattr(streamer, '_end_of_iter_handler', end_of_iter_fn)
it = test_streamer._get_sync_requests_iterator()
num_responses = 0
for req in it:
r = await streamer.process_single_data(request=req)
test_streamer.response_ids.append(r.header.request_id)
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
assert len(test_streamer.request_ids) == len(test_streamer.response_ids)
|
import asyncio
import random
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
class RequestStreamerWrapper:
def __init__(self, num_requests, prefetch, iterate_sync_in_thread):
self.num_requests = num_requests
self.requests_handled = []
self.results_handled = []
self.request_ids = [random_identity() for _ in range(num_requests)]
self.response_ids = []
args = Namespace()
args.prefetch = prefetch
self.streamer = RequestStreamer(
request_handler=self.request_handler_fn,
result_handler=self.result_handle_fn,
end_of_iter_handler=self.end_of_iter_fn,
prefetch=getattr(args, 'prefetch', 0),
iterate_sync_in_thread=iterate_sync_in_thread
)
def request_handler_fn(self, request):
self.requests_handled.append(request)
async def task():
rand_sleep = random.uniform(0.1, 0.6)
await asyncio.sleep(rand_sleep)
docs = request.docs
docs[0].tags['request_handled'] = True
request.data.docs = docs
return request
future = asyncio.ensure_future(task())
return future, None
def result_handle_fn(self, result):
self.results_handled.append(result)
assert isinstance(result, DataRequest)
docs = result.docs
docs[0].tags['result_handled'] = True
result.data.docs = docs
return result
def end_of_iter_fn(self):
# with a sync generator, iteration
assert len(self.requests_handled) == self.num_requests
assert len(self.results_handled) <= self.num_requests
def _yield_data_request(self, i):
req = DataRequest()
req.header.request_id = self.request_ids[i]
da = DocumentArray()
da.append(Document())
req.data.docs = da
return req
def _get_sync_requests_iterator(self):
for i in range(self.num_requests):
yield self._yield_data_request(i)
async def _get_async_requests_iterator(self):
for i in range(self.num_requests):
yield self._yield_data_request(i)
await asyncio.sleep(0.1)
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('async_iterator', [False, True])
@pytest.mark.parametrize('results_in_order', [False, True])
@pytest.mark.parametrize('iterate_sync_in_thread', [False, True])
async def test_request_streamer(
prefetch, num_requests, async_iterator, results_in_order, iterate_sync_in_thread
):
test_streamer = RequestStreamerWrapper(num_requests, prefetch, iterate_sync_in_thread)
streamer = test_streamer.streamer
it = (
test_streamer._get_async_requests_iterator()
if async_iterator
else test_streamer._get_sync_requests_iterator()
)
response = streamer.stream(request_iterator=it, results_in_order=results_in_order)
num_responses = 0
async for r in response:
test_streamer.response_ids.append(r.header.request_id)
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
assert len(test_streamer.request_ids) == len(test_streamer.response_ids)
if results_in_order:
for req_id, resp_id in zip(
test_streamer.request_ids, test_streamer.response_ids
):
assert req_id == resp_id
@pytest.mark.asyncio
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('iterate_sync_in_thread', [False, True])
async def test_request_streamer_process_single_data(monkeypatch, num_requests, iterate_sync_in_thread):
test_streamer = RequestStreamerWrapper(num_requests, 0, iterate_sync_in_thread)
streamer = test_streamer.streamer
def end_of_iter_fn():
# bypass some assertions in RequestStreamerWrapper.end_of_iter_fn
pass
monkeypatch.setattr(streamer, '_end_of_iter_handler', end_of_iter_fn)
it = test_streamer._get_sync_requests_iterator()
num_responses = 0
for req in it:
r = await streamer.process_single_data(request=req)
test_streamer.response_ids.append(r.header.request_id)
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
assert len(test_streamer.request_ids) == len(test_streamer.response_ids)
|
import pytest
from langchain.evaluation.string_distance import (
PairwiseStringDistanceEvalChain,
StringDistance,
StringDistanceEvalChain,
)
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
def test_zero_distance(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = eval_chain.evaluate_strings(prediction=string, reference=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
async def test_zero_distance_async(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = await eval_chain.aevaluate_strings(prediction=string, reference=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
@pytest.mark.parametrize("normalize_score", [True, False])
def test_zero_distance_pairwise(
*,
distance: StringDistance,
normalize_score: bool,
) -> None:
eval_chain = PairwiseStringDistanceEvalChain(
distance=distance,
normalize_score=normalize_score,
)
string = "三人行则必有我师"
result = eval_chain.evaluate_string_pairs(prediction=string, prediction_b=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
async def test_zero_distance_pairwise_async(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = await eval_chain.aevaluate_string_pairs(
prediction=string,
prediction_b=string,
)
assert "score" in result
assert result["score"] == 0
valid_distances = [
distance for distance in StringDistance if distance != StringDistance.HAMMING
]
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
@pytest.mark.parametrize("normalize_score", [True, False])
def test_non_zero_distance(*, distance: StringDistance, normalize_score: bool) -> None:
eval_chain = StringDistanceEvalChain(
distance=distance,
normalize_score=normalize_score,
)
prediction = "I like to eat apples."
reference = "I like apples."
result = eval_chain.evaluate_strings(prediction=prediction, reference=reference)
assert "score" in result
assert result["score"] > 0
if normalize_score:
assert result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
async def test_non_zero_distance_async(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = await eval_chain.aevaluate_strings(
prediction=prediction,
reference=reference,
)
assert "score" in result
assert 0 < result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
def test_non_zero_distance_pairwise(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = eval_chain.evaluate_string_pairs(
prediction=prediction,
prediction_b=reference,
)
assert "score" in result
assert 0 < result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
async def test_non_zero_distance_pairwise_async(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = await eval_chain.aevaluate_string_pairs(
prediction=prediction,
prediction_b=reference,
)
assert "score" in result
assert 0 < result["score"] < 1.0
|
import pytest
from langchain.evaluation.string_distance import (
PairwiseStringDistanceEvalChain,
StringDistance,
StringDistanceEvalChain,
)
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
def test_zero_distance(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = eval_chain.evaluate_strings(prediction=string, reference=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
async def test_zero_distance_async(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = await eval_chain.aevaluate_strings(prediction=string, reference=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
@pytest.mark.parametrize("normalize_score", [True, False])
def test_zero_distance_pairwise(
*, distance: StringDistance, normalize_score: bool
) -> None:
eval_chain = PairwiseStringDistanceEvalChain(
distance=distance, normalize_score=normalize_score
)
string = "三人行则必有我师"
result = eval_chain.evaluate_string_pairs(prediction=string, prediction_b=string)
assert "score" in result
assert result["score"] == 0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", list(StringDistance))
async def test_zero_distance_pairwise_async(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
string = "三人行则必有我师"
result = await eval_chain.aevaluate_string_pairs(
prediction=string, prediction_b=string
)
assert "score" in result
assert result["score"] == 0
valid_distances = [
distance for distance in StringDistance if distance != StringDistance.HAMMING
]
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
@pytest.mark.parametrize("normalize_score", [True, False])
def test_non_zero_distance(*, distance: StringDistance, normalize_score: bool) -> None:
eval_chain = StringDistanceEvalChain(
distance=distance, normalize_score=normalize_score
)
prediction = "I like to eat apples."
reference = "I like apples."
result = eval_chain.evaluate_strings(prediction=prediction, reference=reference)
assert "score" in result
assert result["score"] > 0
if normalize_score:
assert result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
async def test_non_zero_distance_async(distance: StringDistance) -> None:
eval_chain = StringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = await eval_chain.aevaluate_strings(
prediction=prediction, reference=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
def test_non_zero_distance_pairwise(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = eval_chain.evaluate_string_pairs(
prediction=prediction, prediction_b=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
@pytest.mark.requires("rapidfuzz")
@pytest.mark.parametrize("distance", valid_distances)
async def test_non_zero_distance_pairwise_async(distance: StringDistance) -> None:
eval_chain = PairwiseStringDistanceEvalChain(distance=distance)
prediction = "I like to eat apples."
reference = "I like apples."
result = await eval_chain.aevaluate_string_pairs(
prediction=prediction, prediction_b=reference
)
assert "score" in result
assert 0 < result["score"] < 1.0
|
from typing import Iterator, List, Optional
from langchain_core.documents import Document
from pydantic import SecretStr
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.brave_search import BraveSearchWrapper
class BraveSearchLoader(BaseLoader):
"""Load with `Brave Search` engine."""
def __init__(self, query: str, api_key: str, search_kwargs: Optional[dict] = None):
"""Initializes the BraveLoader.
Args:
query: The query to search for.
api_key: The API key to use.
search_kwargs: The search kwargs to use.
"""
self.query = query
self.api_key = api_key
self.search_kwargs = search_kwargs or {}
def load(self) -> List[Document]:
brave_client = BraveSearchWrapper(
api_key=SecretStr(self.api_key),
search_kwargs=self.search_kwargs,
)
return brave_client.download_documents(self.query)
def lazy_load(self) -> Iterator[Document]:
for doc in self.load():
yield doc
|
from typing import Iterator, List, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.brave_search import BraveSearchWrapper
class BraveSearchLoader(BaseLoader):
"""Load with `Brave Search` engine."""
def __init__(self, query: str, api_key: str, search_kwargs: Optional[dict] = None):
"""Initializes the BraveLoader.
Args:
query: The query to search for.
api_key: The API key to use.
search_kwargs: The search kwargs to use.
"""
self.query = query
self.api_key = api_key
self.search_kwargs = search_kwargs or {}
def load(self) -> List[Document]:
brave_client = BraveSearchWrapper(
api_key=self.api_key,
search_kwargs=self.search_kwargs,
)
return brave_client.download_documents(self.query)
def lazy_load(self) -> Iterator[Document]:
for doc in self.load():
yield doc
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
frozen_stages=-1,
zero_init_residual=False,
norm_cfg=norm_cfg,
init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg)))
optim_wrapper = dict(paramwise_cfg=dict(norm_decay_mult=0.))
max_epochs = 73
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[65, 71],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
frozen_stages=-1,
zero_init_residual=False,
norm_cfg=norm_cfg,
init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg)))
optim_wrapper = dict(paramwise_cfg=dict(norm_decay_mult=0.))
max_epochs = 73
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[65, 71],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
"""Xgboost pyspark integration submodule for params."""
from typing import Dict
# pylint: disable=too-few-public-methods
from pyspark.ml.param import TypeConverters
from pyspark.ml.param.shared import Param, Params
class HasArbitraryParamsDict(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the **kwargs parts of the XGBoost
input.
"""
arbitrary_params_dict: "Param[Dict]" = Param(
Params._dummy(),
"arbitrary_params_dict",
"arbitrary_params_dict This parameter holds all of the additional parameters which are "
"not exposed as the XGBoost Spark estimator params but can be recognized by "
"underlying XGBoost library. It is stored as a dictionary.",
)
class HasBaseMarginCol(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the base margin column part of XGboost.
"""
base_margin_col = Param(
Params._dummy(),
"base_margin_col",
"This stores the name for the column of the base margin",
typeConverter=TypeConverters.toString,
)
class HasFeaturesCols(Params):
"""
Mixin for param features_cols: a list of feature column names.
This parameter is taken effect only when use_gpu is enabled.
"""
features_cols = Param(
Params._dummy(),
"features_cols",
"feature column names.",
typeConverter=TypeConverters.toListString,
)
def __init__(self) -> None:
super().__init__()
self._setDefault(features_cols=[])
class HasEnableSparseDataOptim(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the boolean config of enabling sparse data optimization.
"""
enable_sparse_data_optim = Param(
Params._dummy(),
"enable_sparse_data_optim",
"This stores the boolean config of enabling sparse data optimization, if enabled, "
"Xgboost DMatrix object will be constructed from sparse matrix instead of "
"dense matrix. This config is disabled by default. If most of examples in your "
"training dataset contains sparse features, we suggest to enable this config.",
typeConverter=TypeConverters.toBoolean,
)
def __init__(self) -> None:
super().__init__()
self._setDefault(enable_sparse_data_optim=False)
class HasQueryIdCol(Params):
"""
Mixin for param qid_col: query id column name.
"""
qid_col = Param(
Params._dummy(),
"qid_col",
"query id column name",
typeConverter=TypeConverters.toString,
)
class HasContribPredictionCol(Params):
"""
Mixin for param pred_contrib_col: contribution prediction column name.
Output is a 3-dim array, with (rows, groups, columns + 1) for classification case.
Else, it can be a 2 dimension for regression case.
"""
pred_contrib_col: "Param[str]" = Param(
Params._dummy(),
"pred_contrib_col",
"feature contributions to individual predictions.",
typeConverter=TypeConverters.toString,
)
|
"""Xgboost pyspark integration submodule for params."""
from typing import Dict
# pylint: disable=too-few-public-methods
from pyspark.ml.param import TypeConverters
from pyspark.ml.param.shared import Param, Params
class HasArbitraryParamsDict(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the **kwargs parts of the XGBoost
input.
"""
arbitrary_params_dict: "Param[Dict]" = Param(
Params._dummy(),
"arbitrary_params_dict",
"arbitrary_params_dict This parameter holds all of the additional parameters which are "
"not exposed as the the XGBoost Spark estimator params but can be recognized by "
"underlying XGBoost library. It is stored as a dictionary.",
)
class HasBaseMarginCol(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the base margin column part of XGboost.
"""
base_margin_col = Param(
Params._dummy(),
"base_margin_col",
"This stores the name for the column of the base margin",
typeConverter=TypeConverters.toString,
)
class HasFeaturesCols(Params):
"""
Mixin for param features_cols: a list of feature column names.
This parameter is taken effect only when use_gpu is enabled.
"""
features_cols = Param(
Params._dummy(),
"features_cols",
"feature column names.",
typeConverter=TypeConverters.toListString,
)
def __init__(self) -> None:
super().__init__()
self._setDefault(features_cols=[])
class HasEnableSparseDataOptim(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the boolean config of enabling sparse data optimization.
"""
enable_sparse_data_optim = Param(
Params._dummy(),
"enable_sparse_data_optim",
"This stores the boolean config of enabling sparse data optimization, if enabled, "
"Xgboost DMatrix object will be constructed from sparse matrix instead of "
"dense matrix. This config is disabled by default. If most of examples in your "
"training dataset contains sparse features, we suggest to enable this config.",
typeConverter=TypeConverters.toBoolean,
)
def __init__(self) -> None:
super().__init__()
self._setDefault(enable_sparse_data_optim=False)
class HasQueryIdCol(Params):
"""
Mixin for param qid_col: query id column name.
"""
qid_col = Param(
Params._dummy(),
"qid_col",
"query id column name",
typeConverter=TypeConverters.toString,
)
class HasContribPredictionCol(Params):
"""
Mixin for param pred_contrib_col: contribution prediction column name.
Output is a 3-dim array, with (rows, groups, columns + 1) for classification case.
Else, it can be a 2 dimension for regression case.
"""
pred_contrib_col: "Param[str]" = Param(
Params._dummy(),
"pred_contrib_col",
"feature contributions to individual predictions.",
typeConverter=TypeConverters.toString,
)
|
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ELU")
class ELU(Layer):
"""Applies an Exponential Linear Unit function to an output.
Formula:
```
f(x) = alpha * (exp(x) - 1.) for x < 0
f(x) = x for x >= 0
```
Args:
alpha: float, slope of negative section. Defaults to `1.0`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(self, alpha=1.0, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.supports_masking = True
self._build_at_init()
def call(self, inputs):
return activations.elu(inputs, alpha=self.alpha)
def compute_output_shape(self, input_shape):
return input_shape
|
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ELU")
class ELU(Layer):
"""Applies an Exponential Linear Unit function to an output.
Formula:
```
f(x) = alpha * (exp(x) - 1.) for x < 0
f(x) = x for x >= 0
```
Args:
alpha: float, slope of negative section. Defaults to `1.0`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(self, alpha=1.0, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.supports_masking = True
self.built = True
def call(self, inputs):
return activations.elu(inputs, alpha=self.alpha)
def compute_output_shape(self, input_shape):
return input_shape
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5),
rpn_proposal=dict(nms_post=1000, max_per_img=300),
rcnn=dict(
assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
sampler=dict(type='RandomSampler', num=256))),
test_cfg=dict(
rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5),
rpn_proposal=dict(nms_post=1000, max_per_img=300),
rcnn=dict(
assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
sampler=dict(type='RandomSampler', num=256))),
test_cfg=dict(
rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for pad."""
import numpy as np
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_pad_tests(options):
"""Make a set of tests to do pad."""
# TODO(nupurgarg): Add test for tf.uint8.
test_parameters = [
# 5D:
{
"dtype": [tf.int32, tf.int64, tf.float32, tf.bool],
"input_shape": [[1, 1, 2, 1, 1], [2, 1, 1, 1, 1]],
"padding_dtype": [tf.int32, tf.int64],
"paddings": [
[[0, 0], [0, 1], [2, 3], [0, 0], [1, 0]],
[[0, 1], [0, 0], [0, 0], [2, 3], [1, 0]],
],
"constant_paddings": [True, False],
"fully_quantize": [False],
"quant_16x8": [False],
},
# 4D:
{
"dtype": [tf.int32, tf.int64, tf.float32, tf.bool],
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"padding_dtype": [tf.int32, tf.int64],
"paddings": [
[[0, 0], [0, 1], [2, 3], [0, 0]],
[[0, 1], [0, 0], [0, 0], [2, 3]],
],
"constant_paddings": [True, False],
"fully_quantize": [False],
"quant_16x8": [False],
},
# 2D:
{
"dtype": [tf.int32, tf.int64, tf.float32, tf.bool],
"input_shape": [[1, 2]],
"padding_dtype": [tf.int32, tf.int64],
"paddings": [[[0, 1], [2, 3]]],
"constant_paddings": [True, False],
"fully_quantize": [False],
"quant_16x8": [False],
},
# 1D:
{
"dtype": [tf.int32, tf.bool],
"input_shape": [[1]],
"padding_dtype": [tf.int32, tf.int64],
"paddings": [[[1, 2]]],
"constant_paddings": [False],
"fully_quantize": [False],
"quant_16x8": [False],
},
# 4D:
{
"dtype": [tf.float32, tf.bool],
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"padding_dtype": [tf.int32, tf.int64],
"paddings": [
[[0, 0], [0, 1], [2, 3], [0, 0]],
[[0, 1], [0, 0], [0, 0], [2, 3]],
[[0, 0], [0, 0], [0, 0], [0, 0]],
],
"constant_paddings": [True],
"fully_quantize": [True],
"quant_16x8": [False, True],
},
# 2D:
{
"dtype": [tf.float32, tf.bool],
"input_shape": [[1, 2]],
"padding_dtype": [tf.int32, tf.int64],
"paddings": [[[0, 1], [2, 3]]],
"constant_paddings": [True],
"fully_quantize": [True],
"quant_16x8": [False, True],
},
# 1D:
{
"dtype": [tf.float32, tf.bool],
"input_shape": [[1]],
"padding_dtype": [tf.int32, tf.int64],
"paddings": [[[1, 2]]],
"constant_paddings": [True],
"fully_quantize": [True],
"quant_16x8": [False, True],
},
]
def build_graph(parameters):
"""Build a pad graph given `parameters`."""
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
# Get paddings as either a placeholder or constants.
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.compat.v1.placeholder(
dtype=parameters["padding_dtype"], name="padding", shape=shape
)
input_tensors = [input_tensor, paddings]
out = tf.pad(tensor=input_tensor, paddings=paddings)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for pad op."""
values = [
create_tensor_data(
parameters["dtype"],
parameters["input_shape"],
min_value=-1,
max_value=1)
]
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for pad."""
import numpy as np
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_pad_tests(options):
"""Make a set of tests to do pad."""
# TODO(nupurgarg): Add test for tf.uint8.
test_parameters = [
# 5D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 1, 2, 1, 1], [2, 1, 1, 1, 1]],
"padding_dtype": [tf.int32, tf.int64],
"paddings": [
[[0, 0], [0, 1], [2, 3], [0, 0], [1, 0]],
[[0, 1], [0, 0], [0, 0], [2, 3], [1, 0]],
],
"constant_paddings": [True, False],
"fully_quantize": [False],
"quant_16x8": [False],
},
# 4D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"padding_dtype": [tf.int32, tf.int64],
"paddings": [
[[0, 0], [0, 1], [2, 3], [0, 0]],
[[0, 1], [0, 0], [0, 0], [2, 3]],
],
"constant_paddings": [True, False],
"fully_quantize": [False],
"quant_16x8": [False],
},
# 2D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2]],
"padding_dtype": [tf.int32, tf.int64],
"paddings": [[[0, 1], [2, 3]]],
"constant_paddings": [True, False],
"fully_quantize": [False],
"quant_16x8": [False],
},
# 1D:
{
"dtype": [tf.int32],
"input_shape": [[1]],
"padding_dtype": [tf.int32, tf.int64],
"paddings": [[[1, 2]]],
"constant_paddings": [False],
"fully_quantize": [False],
"quant_16x8": [False],
},
# 4D:
{
"dtype": [tf.float32],
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"padding_dtype": [tf.int32, tf.int64],
"paddings": [
[[0, 0], [0, 1], [2, 3], [0, 0]],
[[0, 1], [0, 0], [0, 0], [2, 3]],
[[0, 0], [0, 0], [0, 0], [0, 0]],
],
"constant_paddings": [True],
"fully_quantize": [True],
"quant_16x8": [False, True],
},
# 2D:
{
"dtype": [tf.float32],
"input_shape": [[1, 2]],
"padding_dtype": [tf.int32, tf.int64],
"paddings": [[[0, 1], [2, 3]]],
"constant_paddings": [True],
"fully_quantize": [True],
"quant_16x8": [False, True],
},
# 1D:
{
"dtype": [tf.float32],
"input_shape": [[1]],
"padding_dtype": [tf.int32, tf.int64],
"paddings": [[[1, 2]]],
"constant_paddings": [True],
"fully_quantize": [True],
"quant_16x8": [False, True],
},
]
def build_graph(parameters):
"""Build a pad graph given `parameters`."""
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
# Get paddings as either a placeholder or constants.
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.compat.v1.placeholder(
dtype=parameters["padding_dtype"], name="padding", shape=shape
)
input_tensors = [input_tensor, paddings]
out = tf.pad(tensor=input_tensor, paddings=paddings)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for pad op."""
values = [
create_tensor_data(
parameters["dtype"],
parameters["input_shape"],
min_value=-1,
max_value=1)
]
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
"""
Experimental support for external memory
========================================
This is similar to the one in `quantile_data_iterator.py`, but for external memory
instead of Quantile DMatrix. The feature is not ready for production use yet.
.. versionadded:: 1.5.0
See :doc:`the tutorial </tutorials/external_memory>` for more details.
"""
import os
import tempfile
from typing import Callable, List, Tuple
import numpy as np
from sklearn.datasets import make_regression
import xgboost
def make_batches(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
tmpdir: str,
) -> List[Tuple[str, str]]:
files: List[Tuple[str, str]] = []
rng = np.random.RandomState(1994)
for i in range(n_batches):
X, y = make_regression(n_samples_per_batch, n_features, random_state=rng)
X_path = os.path.join(tmpdir, "X-" + str(i) + ".npy")
y_path = os.path.join(tmpdir, "y-" + str(i) + ".npy")
np.save(X_path, X)
np.save(y_path, y)
files.append((X_path, y_path))
return files
class Iterator(xgboost.DataIter):
"""A custom iterator for loading files in batches."""
def __init__(self, file_paths: List[Tuple[str, str]]):
self._file_paths = file_paths
self._it = 0
# XGBoost will generate some cache files under current directory with the prefix
# "cache"
super().__init__(cache_prefix=os.path.join(".", "cache"))
def load_file(self) -> Tuple[np.ndarray, np.ndarray]:
X_path, y_path = self._file_paths[self._it]
X = np.load(X_path)
y = np.load(y_path)
assert X.shape[0] == y.shape[0]
return X, y
def next(self, input_data: Callable) -> int:
"""Advance the iterator by 1 step and pass the data to XGBoost. This function is
called by XGBoost during the construction of ``DMatrix``
"""
if self._it == len(self._file_paths):
# return 0 to let XGBoost know this is the end of iteration
return 0
# input_data is a function passed in by XGBoost who has the similar signature to
# the ``DMatrix`` constructor.
X, y = self.load_file()
input_data(data=X, label=y)
self._it += 1
return 1
def reset(self) -> None:
"""Reset the iterator to its beginning"""
self._it = 0
def main(tmpdir: str) -> xgboost.Booster:
# generate some random data for demo
files = make_batches(1024, 17, 31, tmpdir)
it = Iterator(files)
# For non-data arguments, specify it here once instead of passing them by the `next`
# method.
missing = np.nan
Xy = xgboost.DMatrix(it, missing=missing, enable_categorical=False)
# ``approx`` is also supported, but less efficient due to sketching. GPU behaves
# differently than CPU tree methods as it uses a hybrid approach. See tutorial in
# doc for details.
booster = xgboost.train(
{"tree_method": "hist", "max_depth": 4},
Xy,
evals=[(Xy, "Train")],
num_boost_round=10,
)
return booster
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tmpdir:
main(tmpdir)
|
"""
Experimental support for external memory
========================================
This is similar to the one in `quantile_data_iterator.py`, but for external memory
instead of Quantile DMatrix. The feature is not ready for production use yet.
.. versionadded:: 1.5.0
See :doc:`the tutorial </tutorials/external_memory>` for more details.
"""
import os
import tempfile
from typing import Callable, List, Tuple
import numpy as np
from sklearn.datasets import make_regression
import xgboost
def make_batches(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
tmpdir: str,
) -> List[Tuple[str, str]]:
files: List[Tuple[str, str]] = []
rng = np.random.RandomState(1994)
for i in range(n_batches):
X, y = make_regression(n_samples_per_batch, n_features, random_state=rng)
X_path = os.path.join(tmpdir, "X-" + str(i) + ".npy")
y_path = os.path.join(tmpdir, "y-" + str(i) + ".npy")
np.save(X_path, X)
np.save(y_path, y)
files.append((X_path, y_path))
return files
class Iterator(xgboost.DataIter):
"""A custom iterator for loading files in batches."""
def __init__(self, file_paths: List[Tuple[str, str]]):
self._file_paths = file_paths
self._it = 0
# XGBoost will generate some cache files under current directory with the prefix
# "cache"
super().__init__(cache_prefix=os.path.join(".", "cache"))
def load_file(self) -> Tuple[np.ndarray, np.ndarray]:
X_path, y_path = self._file_paths[self._it]
X = np.load(X_path)
y = np.load(y_path)
assert X.shape[0] == y.shape[0]
return X, y
def next(self, input_data: Callable) -> int:
"""Advance the iterator by 1 step and pass the data to XGBoost. This function is
called by XGBoost during the construction of ``DMatrix``
"""
if self._it == len(self._file_paths):
# return 0 to let XGBoost know this is the end of iteration
return 0
# input_data is a function passed in by XGBoost who has the similar signature to
# the ``DMatrix`` constructor.
X, y = self.load_file()
input_data(data=X, label=y)
self._it += 1
return 1
def reset(self) -> None:
"""Reset the iterator to its beginning"""
self._it = 0
def main(tmpdir: str) -> xgboost.Booster:
# generate some random data for demo
files = make_batches(1024, 17, 31, tmpdir)
it = Iterator(files)
# For non-data arguments, specify it here once instead of passing them by the `next`
# method.
missing = np.NaN
Xy = xgboost.DMatrix(it, missing=missing, enable_categorical=False)
# ``approx`` is also supported, but less efficient due to sketching. GPU behaves
# differently than CPU tree methods as it uses a hybrid approach. See tutorial in
# doc for details.
booster = xgboost.train(
{"tree_method": "hist", "max_depth": 4},
Xy,
evals=[(Xy, "Train")],
num_boost_round=10,
)
return booster
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tmpdir:
main(tmpdir)
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
img_scales = [(1333, 800), (666, 400), (2000, 1200)]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[[
dict(type='Resize', scale=s, keep_ratio=True) for s in img_scales
], [
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
], [dict(type='LoadAnnotations', with_bbox=True)],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor', 'flip',
'flip_direction'))
]])
]
|
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
img_scales = [(1333, 800), (666, 400), (2000, 1200)]
tta_pipeline = [
dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),
dict(
type='TestTimeAug',
transforms=[[
dict(type='Resize', scale=s, keep_ratio=True) for s in img_scales
], [
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
], [dict(type='LoadAnnotations', with_bbox=True)],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor', 'flip',
'flip_direction'))
]])
]
|
# Copyright (c) OpenMMLab. All rights reserved.
# This file add snake case alias for coco api
import warnings
from collections import defaultdict
from typing import List, Optional, Union
import pycocotools
from pycocotools.coco import COCO as _COCO
from pycocotools.cocoeval import COCOeval as _COCOeval
class COCO(_COCO):
"""This class is almost the same as official pycocotools package.
It implements some snake case function aliases. So that the COCO class has
the same interface as LVIS class.
"""
def __init__(self, annotation_file=None):
if getattr(pycocotools, '__version__', '0') >= '12.0.2':
warnings.warn(
'mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', # noqa: E501
UserWarning)
super().__init__(annotation_file=annotation_file)
self.img_ann_map = self.imgToAnns
self.cat_img_map = self.catToImgs
def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):
return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)
def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):
return self.getCatIds(cat_names, sup_names, cat_ids)
def get_img_ids(self, img_ids=[], cat_ids=[]):
return self.getImgIds(img_ids, cat_ids)
def load_anns(self, ids):
return self.loadAnns(ids)
def load_cats(self, ids):
return self.loadCats(ids)
def load_imgs(self, ids):
return self.loadImgs(ids)
# just for the ease of import
COCOeval = _COCOeval
class COCOPanoptic(COCO):
"""This wrapper is for loading the panoptic style annotation file.
The format is shown in the CocoPanopticDataset class.
Args:
annotation_file (str, optional): Path of annotation file.
Defaults to None.
"""
def __init__(self, annotation_file: Optional[str] = None) -> None:
super(COCOPanoptic, self).__init__(annotation_file)
def createIndex(self) -> None:
"""Create index."""
# create index
print('creating index...')
# anns stores 'segment_id -> annotation'
anns, cats, imgs = {}, {}, {}
img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
for seg_ann in ann['segments_info']:
# to match with instance.json
seg_ann['image_id'] = ann['image_id']
img_to_anns[ann['image_id']].append(seg_ann)
# segment_id is not unique in coco dataset orz...
# annotations from different images but
# may have same segment_id
if seg_ann['id'] in anns.keys():
anns[seg_ann['id']].append(seg_ann)
else:
anns[seg_ann['id']] = [seg_ann]
# filter out annotations from other images
img_to_anns_ = defaultdict(list)
for k, v in img_to_anns.items():
img_to_anns_[k] = [x for x in v if x['image_id'] == k]
img_to_anns = img_to_anns_
if 'images' in self.dataset:
for img_info in self.dataset['images']:
img_info['segm_file'] = img_info['file_name'].replace(
'jpg', 'png')
imgs[img_info['id']] = img_info
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
for seg_ann in ann['segments_info']:
cat_to_imgs[seg_ann['category_id']].append(ann['image_id'])
print('index created!')
self.anns = anns
self.imgToAnns = img_to_anns
self.catToImgs = cat_to_imgs
self.imgs = imgs
self.cats = cats
def load_anns(self,
ids: Union[List[int], int] = []) -> Optional[List[dict]]:
"""Load anns with the specified ids.
``self.anns`` is a list of annotation lists instead of a
list of annotations.
Args:
ids (Union[List[int], int]): Integer ids specifying anns.
Returns:
anns (List[dict], optional): Loaded ann objects.
"""
anns = []
if hasattr(ids, '__iter__') and hasattr(ids, '__len__'):
# self.anns is a list of annotation lists instead of
# a list of annotations
for id in ids:
anns += self.anns[id]
return anns
elif type(ids) == int:
return self.anns[ids]
|
# Copyright (c) OpenMMLab. All rights reserved.
# This file add snake case alias for coco api
import warnings
from collections import defaultdict
from typing import List, Optional, Union
import pycocotools
from pycocotools.coco import COCO as _COCO
from pycocotools.cocoeval import COCOeval as _COCOeval
class COCO(_COCO):
"""This class is almost the same as official pycocotools package.
It implements some snake case function aliases. So that the COCO class has
the same interface as LVIS class.
"""
def __init__(self, annotation_file=None):
if getattr(pycocotools, '__version__', '0') >= '12.0.2':
warnings.warn(
'mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', # noqa: E501
UserWarning)
super().__init__(annotation_file=annotation_file)
self.img_ann_map = self.imgToAnns
self.cat_img_map = self.catToImgs
def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):
return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)
def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):
return self.getCatIds(cat_names, sup_names, cat_ids)
def get_img_ids(self, img_ids=[], cat_ids=[]):
return self.getImgIds(img_ids, cat_ids)
def load_anns(self, ids):
return self.loadAnns(ids)
def load_cats(self, ids):
return self.loadCats(ids)
def load_imgs(self, ids):
return self.loadImgs(ids)
# just for the ease of import
COCOeval = _COCOeval
class COCOPanoptic(COCO):
"""This wrapper is for loading the panoptic style annotation file.
The format is shown in the CocoPanopticDataset class.
Args:
annotation_file (str, optional): Path of annotation file.
Defaults to None.
"""
def __init__(self, annotation_file: Optional[str] = None) -> None:
super(COCOPanoptic, self).__init__(annotation_file)
def createIndex(self) -> None:
"""Create index."""
# create index
print('creating index...')
# anns stores 'segment_id -> annotation'
anns, cats, imgs = {}, {}, {}
img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann, img_info in zip(self.dataset['annotations'],
self.dataset['images']):
img_info['segm_file'] = ann['file_name']
for seg_ann in ann['segments_info']:
# to match with instance.json
seg_ann['image_id'] = ann['image_id']
seg_ann['height'] = img_info['height']
seg_ann['width'] = img_info['width']
img_to_anns[ann['image_id']].append(seg_ann)
# segment_id is not unique in coco dataset orz...
if seg_ann['id'] in anns.keys():
anns[seg_ann['id']].append(seg_ann)
else:
anns[seg_ann['id']] = [seg_ann]
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
for seg_ann in ann['segments_info']:
cat_to_imgs[seg_ann['category_id']].append(ann['image_id'])
print('index created!')
self.anns = anns
self.imgToAnns = img_to_anns
self.catToImgs = cat_to_imgs
self.imgs = imgs
self.cats = cats
def load_anns(self,
ids: Union[List[int], int] = []) -> Optional[List[dict]]:
"""Load anns with the specified ids.
``self.anns`` is a list of annotation lists instead of a
list of annotations.
Args:
ids (Union[List[int], int]): Integer ids specifying anns.
Returns:
anns (List[dict], optional): Loaded ann objects.
"""
anns = []
if hasattr(ids, '__iter__') and hasattr(ids, '__len__'):
# self.anns is a list of annotation lists instead of
# a list of annotations
for id in ids:
anns += self.anns[id]
return anns
elif type(ids) == int:
return self.anns[ids]
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
# TODO: MMEngine does not support fp16 yet.
# fp16 = dict(loss_scale=512.)
|
from ._dsp import adsr_envelope, oscillator_bank
from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve
__all__ = [
"add_noise",
"adsr_envelope",
"barkscale_fbanks",
"convolve",
"fftconvolve",
"oscillator_bank",
]
|
from ._dsp import oscillator_bank
from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve
__all__ = [
"add_noise",
"barkscale_fbanks",
"convolve",
"fftconvolve",
"oscillator_bank",
]
|
__version__ = '2023.01.18.alpha'
from docarray.array.array.array import DocumentArray
from docarray.base_document.document import BaseDocument
__all__ = [
'BaseDocument',
'DocumentArray',
]
|
__version__ = '2023.01.18.alpha'
from docarray.array.array import DocumentArray
from docarray.base_document.document import BaseDocument
__all__ = [
'BaseDocument',
'DocumentArray',
]
|
from jina.schemas.gateway import schema_gateway
from jina.schemas.helper import _cli_to_schema
from jina_cli.export import api_to_dict
_schema_flow_with = _cli_to_schema(
api_to_dict(),
['flow', 'gateway'],
allow_addition=False,
description='The config of Flow, unrecognized config arguments will be applied to all Deployments',
)['Jina::Flow']
schema_flow = {
'Jina::Flow': {
'properties': {
'with': _schema_flow_with,
'jtype': {
'description': 'The type of Jina object (Flow, Executor).\n'
'A Flow is made up of several sub-tasks, and it manages the states and context of these sub-tasks.\n'
'The input and output data of Flows are Documents.',
'type': 'string',
'default': 'Flow',
'enum': ['Flow'],
},
'version': {
'description': 'The YAML version of this Flow.',
'type': 'string',
'default': '\'1\'',
},
'executors': {
'description': 'Define the steps in the Flow.\n'
'A Deployment is a container and interface for one or multiple Pods that have the same properties.',
'type': 'array',
'items': {'$ref': '#/definitions/Jina::Deployment'},
'minItems': 1,
},
'gateway': schema_gateway['Jina::Gateway'],
},
'type': 'object',
'additionalProperties': False,
'required': ['jtype', 'executors'],
}
}
|
from jina.schemas.helper import _cli_to_schema
from jina_cli.export import api_to_dict
_schema_flow_with = _cli_to_schema(
api_to_dict(),
['flow', 'gateway'],
allow_addition=False,
description='The config of Flow, unrecognized config arguments will be applied to all Deployments',
)['Jina::Flow']
schema_flow = {
'Jina::Flow': {
'properties': {
'with': _schema_flow_with,
'jtype': {
'description': 'The type of Jina object (Flow, Executor).\n'
'A Flow is made up of several sub-tasks, and it manages the states and context of these sub-tasks.\n'
'The input and output data of Flows are Documents.',
'type': 'string',
'default': 'Flow',
'enum': ['Flow'],
},
'version': {
'description': 'The YAML version of this Flow.',
'type': 'string',
'default': '\'1\'',
},
'executors': {
'description': 'Define the steps in the Flow.\n'
'A Deployment is a container and interface for one or multiple Pods that have the same properties.',
'type': 'array',
'items': {'$ref': '#/definitions/Jina::Deployment'},
'minItems': 1,
},
},
'type': 'object',
'additionalProperties': False,
'required': ['jtype', 'executors'],
}
}
|
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import subprocess
from typing import Callable, List
import pytest
from jina import DocumentArray, Flow
from ...transform_encoder import TransformerTorchEncoder
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(data_generator: Callable, request_size: int):
with Flow(return_results=True).add(uses=TransformerTorchEncoder) as flow:
resp = flow.post(
on='/index',
inputs=data_generator(),
request_size=request_size,
return_results=True,
)
assert min(len(resp) * request_size, 50) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
def filter_none(elements):
return list(filter(lambda e: e is not None, elements))
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(pytest.lazy_fixture('docs_with_text'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_text'),
[['r', 0], ['c', 10], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_text'),
[['r', 0], ['c', 0], ['cc', 10]],
'cc',
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
assert (
len(
filter_none(
DocumentArray(res[0].docs)
.traverse_flat([path])
.get_attributes('embedding')
)
)
== count
)
return validate
flow = Flow(return_results=True).add(uses=TransformerTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_path]},
return_results=True,
)
validate_traversal(docs_per_path)(resp)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Callable, List
import pytest
from jina import DocumentArray, Flow
from ...transform_encoder import TransformerTorchEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(data_generator: Callable, request_size: int):
with Flow(return_results=True).add(uses=TransformerTorchEncoder) as flow:
resp = flow.post(
on="/index", inputs=data_generator(), request_size=request_size, return_results=True
)
assert min(len(resp) * request_size, 50) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
def filter_none(elements):
return list(filter(lambda e: e is not None, elements))
@pytest.mark.parametrize(
["docs", "docs_per_path", "traversal_path"],
[
(pytest.lazy_fixture("docs_with_text"), [["r", 10], ["c", 0], ["cc", 0]], "r"),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[["r", 0], ["c", 10], ["cc", 0]],
"c",
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[["r", 0], ["c", 0], ["cc", 10]],
"cc",
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
assert len(
filter_none(
DocumentArray(res[0].docs)
.traverse_flat([path])
.get_attributes("embedding")
)
) == count
return validate
flow = Flow(return_results=True).add(uses=TransformerTorchEncoder)
with flow:
resp = flow.post(
on="/test", inputs=docs, parameters={"traversal_paths": [traversal_path]}, return_results=True
)
validate_traversal(docs_per_path)(resp)
|
from typing import List, Optional
from docarray import BaseDoc, DocList
from jina import Executor, Flow, requests
class Nested2Doc(BaseDoc):
value: str
class Nested1Doc(BaseDoc):
nested: Nested2Doc
class RootDoc(BaseDoc):
nested: Optional[Nested1Doc]
num: Optional[int]
text: str
class OptionalNested1Doc(BaseDoc):
nested: Optional[Nested2Doc]
class RootDocWithNestedList(BaseDoc):
nested: Optional[List[OptionalNested1Doc]]
num: Optional[int]
text: str
class NestedSchemaExecutor(Executor):
@requests(on='/endpoint')
async def endpoint(self, docs: DocList[RootDoc], **kwargs) -> DocList[RootDoc]:
rets = DocList[RootDoc]()
rets.append(
RootDoc(
text='hello world', nested=Nested1Doc(nested=Nested2Doc(value='test'))
)
)
return rets
class ListNestedSchemaExecutor(Executor):
@requests(on='/endpoint')
async def endpoint(
self, docs: DocList[RootDocWithNestedList], **kwargs
) -> DocList[RootDocWithNestedList]:
rets = DocList[RootDocWithNestedList]()
rets.append(
RootDocWithNestedList(
text='hello world', nested=[Nested1Doc(nested=Nested2Doc(value='test'))]
)
)
return rets
def test_issue_6019():
flow = Flow().add(name='inference', needs='gateway', uses=NestedSchemaExecutor)
with flow:
res = flow.post(
on='/endpoint', inputs=RootDoc(text='hello'), return_type=DocList[RootDoc]
)
assert res[0].text == 'hello world'
assert res[0].nested.nested.value == 'test'
def test_issue_6019_with_nested_list():
flow = Flow().add(name='inference', needs='gateway', uses=ListNestedSchemaExecutor)
with flow:
res = flow.post(
on='/endpoint',
inputs=RootDocWithNestedList(text='hello'),
return_type=DocList[RootDocWithNestedList],
)
assert res[0].text == 'hello world'
assert res[0].nested[0].nested.value == 'test'
def test_issue_6084():
class EnvInfo(BaseDoc):
history: str = ''
class A(BaseDoc):
b: EnvInfo
class MyIssue6084Exec(Executor):
@requests
def foo(self, docs: DocList[A], **kwargs) -> DocList[A]:
pass
f = Flow().add(uses=MyIssue6084Exec).add(uses=MyIssue6084Exec)
with f:
pass
|
from typing import List, Optional
from docarray import BaseDoc, DocList
from jina import Executor, Flow, requests
class Nested2Doc(BaseDoc):
value: str
class Nested1Doc(BaseDoc):
nested: Nested2Doc
class RootDoc(BaseDoc):
nested: Optional[Nested1Doc]
num: Optional[int]
text: str
class OptionalNested1Doc(BaseDoc):
nested: Optional[Nested2Doc]
class RootDocWithNestedList(BaseDoc):
nested: Optional[List[OptionalNested1Doc]]
num: Optional[int]
text: str
class NestedSchemaExecutor(Executor):
@requests(on='/endpoint')
async def endpoint(self, docs: DocList[RootDoc], **kwargs) -> DocList[RootDoc]:
rets = DocList[RootDoc]()
rets.append(
RootDoc(
text='hello world', nested=Nested1Doc(nested=Nested2Doc(value='test'))
)
)
return rets
class ListNestedSchemaExecutor(Executor):
@requests(on='/endpoint')
async def endpoint(
self, docs: DocList[RootDocWithNestedList], **kwargs
) -> DocList[RootDocWithNestedList]:
rets = DocList[RootDocWithNestedList]()
rets.append(
RootDocWithNestedList(
text='hello world', nested=[Nested1Doc(nested=Nested2Doc(value='test'))]
)
)
return rets
def test_issue_6019():
flow = Flow().add(name='inference', needs='gateway', uses=NestedSchemaExecutor)
with flow:
res = flow.post(
on='/endpoint', inputs=RootDoc(text='hello'), return_type=DocList[RootDoc]
)
assert res[0].text == 'hello world'
assert res[0].nested.nested.value == 'test'
def test_issue_6019_with_nested_list():
flow = Flow().add(name='inference', needs='gateway', uses=ListNestedSchemaExecutor)
with flow:
res = flow.post(
on='/endpoint',
inputs=RootDocWithNestedList(text='hello'),
return_type=DocList[RootDocWithNestedList],
)
assert res[0].text == 'hello world'
assert res[0].nested[0].nested.value == 'test'
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import ATSSHead
class TestATSSHead(TestCase):
def test_atss_head_loss(self):
"""Tests atss head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
atss_head = ATSSHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
norm_cfg=None,
train_cfg=cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds, centernesses = atss_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_centerness'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_centerness_loss.item(), 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_centerness'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_centerness_loss.item(), 0,
'centerness loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import ATSSHead
class TestATSSHead(TestCase):
def test_atss_head_loss(self):
"""Tests atss head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
atss_head = ATSSHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
norm_cfg=None,
train_cfg=cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds, centernesses = atss_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_centerness'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_centerness_loss.item(), 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_centerness'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_centerness_loss.item(), 0,
'centerness loss should be non-zero')
|
"""
Remote file reader.
A loader that fetches any remote page or file by URL and retrieves child pages with certain constraints. The class also parses the contents of each page and provides access to the parsed data.
"""
from typing import Any, Dict, List, Optional, Union
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.remote import RemoteReader
class RemoteDepthReader(BaseReader):
def __init__(
self,
*args: Any,
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
depth: int = 1,
domain_lock: bool = False,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self.file_extractor = file_extractor
self.depth = depth
self.domain_lock = domain_lock
def load_data(self, url: str) -> List[Document]:
from tqdm.auto import tqdm
"""Parse whatever is at the URL.""" ""
remote_reader = RemoteReader(file_extractor=self.file_extractor)
documents = []
links = self.get_links(url)
urls = {-1: [url]} # -1 is the starting point
links_visited = []
for i in range(self.depth + 1):
urls[i] = []
new_links = []
print(f"Reading links at depth {i}...")
for link in tqdm(links):
"""Checking if the link belongs the provided domain."""
if (self.domain_lock and link.find(url) > -1) or (not self.domain_lock):
print("Loading link: " + link)
if link in links_visited:
continue
if link:
urls[i].append(link)
new_links.extend(self.get_links(link))
links_visited.append(link)
else:
print("Link ignored: " + link)
new_links = list(set(new_links))
links = new_links
print(f"Found {len(urls)} links at depth {self.depth}.")
for depth_i in urls:
for url in urls[depth_i]:
try:
documents.extend(remote_reader.load_data(url))
except Exception as e:
print(f"Error reading {url} at depth {depth_i}: {e}")
continue
return documents
@staticmethod
def is_url(href) -> bool:
"""Check if a link is a URL."""
return href.startswith("http")
def get_links(self, url) -> List[str]:
from urllib.parse import urljoin, urlparse, urlunparse
from bs4 import BeautifulSoup
"""Get all links from a page."""
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
links = soup.find_all("a")
result = []
for link in links:
if isinstance(link, str):
href = link
else:
href = link.get("href")
if href is not None:
if not self.is_url(href):
href = urljoin(url, href)
url_parsed = urlparse(href)
url_without_query_string = urlunparse(
(url_parsed.scheme, url_parsed.netloc, url_parsed.path, "", "", "")
)
if (
url_without_query_string not in result
and url_without_query_string
and url_without_query_string.startswith("http")
):
result.append(url_without_query_string)
return result
|
"""Remote file reader.
A loader that fetches any remote page or file by URL and retrieves child pages with certain constraints. The class also parses the contents of each page and provides access to the parsed data.
"""
from typing import Any, Dict, List, Optional, Union
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.remote import RemoteReader
class RemoteDepthReader(BaseReader):
def __init__(
self,
*args: Any,
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
depth: int = 1,
domain_lock: bool = False,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self.file_extractor = file_extractor
self.depth = depth
self.domain_lock = domain_lock
def load_data(self, url: str) -> List[Document]:
from tqdm.auto import tqdm
"""Parse whatever is at the URL.""" ""
remote_reader = RemoteReader(file_extractor=self.file_extractor)
documents = []
links = self.get_links(url)
urls = {-1: [url]} # -1 is the starting point
links_visited = []
for i in range(self.depth + 1):
urls[i] = []
new_links = []
print(f"Reading links at depth {i}...")
for link in tqdm(links):
"""Checking if the link belongs the provided domain."""
if (self.domain_lock and link.find(url) > -1) or (not self.domain_lock):
print("Loading link: " + link)
if link in links_visited:
continue
if link:
urls[i].append(link)
new_links.extend(self.get_links(link))
links_visited.append(link)
else:
print("Link ignored: " + link)
new_links = list(set(new_links))
links = new_links
print(f"Found {len(urls)} links at depth {self.depth}.")
for depth_i in urls:
for url in urls[depth_i]:
try:
documents.extend(remote_reader.load_data(url))
except Exception as e:
print(f"Error reading {url} at depth {depth_i}: {e}")
continue
return documents
@staticmethod
def is_url(href) -> bool:
"""Check if a link is a URL."""
return href.startswith("http")
def get_links(self, url) -> List[str]:
from urllib.parse import urljoin, urlparse, urlunparse
from bs4 import BeautifulSoup
"""Get all links from a page."""
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
links = soup.find_all("a")
result = []
for link in links:
if isinstance(link, str):
href = link
else:
href = link.get("href")
if href is not None:
if not self.is_url(href):
href = urljoin(url, href)
url_parsed = urlparse(href)
url_without_query_string = urlunparse(
(url_parsed.scheme, url_parsed.netloc, url_parsed.path, "", "", "")
)
if (
url_without_query_string not in result
and url_without_query_string
and url_without_query_string.startswith("http")
):
result.append(url_without_query_string)
return result
|
from .yolov5_segmenter import YoloV5Segmenter
|
from .yolov5_segmenter import YoloV5Segmenter
|
_base_ = '../grounding_dino_swin-t_pretrain_obj365.py'
data_root = 'data/RUOD/'
class_name = ('holothurian', 'echinus', 'scallop', 'starfish', 'fish',
'corals', 'diver', 'cuttlefish', 'turtle', 'jellyfish')
palette = [(235, 211, 70), (106, 90, 205), (160, 32, 240), (176, 23, 31),
(142, 0, 0), (230, 0, 0), (106, 0, 228), (60, 100, 0), (80, 100, 0),
(70, 0, 0)]
metainfo = dict(classes=class_name, palette=palette)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
]
]),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction', 'text',
'custom_entities'))
]
train_dataloader = dict(
sampler=dict(_delete_=True, type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
_delete_=True,
type='CocoDataset',
data_root=data_root,
metainfo=metainfo,
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=train_pipeline,
return_classes=True,
ann_file='RUOD_ANN/instances_train.json',
data_prefix=dict(img='RUOD_pic/train/')))
val_dataloader = dict(
dataset=dict(
metainfo=metainfo,
data_root=data_root,
return_classes=True,
ann_file='RUOD_ANN/instances_test.json',
data_prefix=dict(img='RUOD_pic/test/')))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'RUOD_ANN/instances_test.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
optim_wrapper = dict(
_delete_=True,
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'backbone': dict(lr_mult=0.1)
}))
# learning policy
max_epochs = 12
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs, val_interval=1)
default_hooks = dict(checkpoint=dict(max_keep_ckpts=1, save_best='auto'))
load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa
|
_base_ = '../grounding_dino_swin-t_pretrain_obj365.py'
data_root = 'data/RUOD/'
class_name = ('holothurian', 'echinus', 'scallop', 'starfish', 'fish',
'corals', 'diver', 'cuttlefish', 'turtle', 'jellyfish')
palette = [(235, 211, 70), (106, 90, 205), (160, 32, 240), (176, 23, 31),
(142, 0, 0), (230, 0, 0), (106, 0, 228), (60, 100, 0), (80, 100, 0),
(70, 0, 0)]
metainfo = dict(classes=class_name, palette=palette)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
]
]),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction', 'text',
'custom_entities'))
]
train_dataloader = dict(
sampler=dict(_delete_=True, type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
_delete_=True,
type='CocoDataset',
data_root=data_root,
metainfo=metainfo,
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=train_pipeline,
return_classes=True,
ann_file='RUOD_ANN/instances_train.json',
data_prefix=dict(img='RUOD_pic/train/')))
val_dataloader = dict(
dataset=dict(
metainfo=metainfo,
data_root=data_root,
return_classes=True,
ann_file='RUOD_ANN/instances_test.json',
data_prefix=dict(img='RUOD_pic/test/')))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'RUOD_ANN/instances_test.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
optim_wrapper = dict(
_delete_=True,
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'backbone': dict(lr_mult=0.1)
}))
# learning policy
max_epochs = 12
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs, val_interval=1)
default_hooks = dict(checkpoint=dict(max_keep_ckpts=1, save_best='auto'))
load_from = ''
|
_base_ = '../gcnet/mask-rcnn_r50-syncbn-gcb-r4-c3-c5_fpn_1x_coco.py'
# model settings
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='GenericRoIExtractor',
aggregation='sum',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2)),
mask_roi_extractor=dict(
type='GenericRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2))))
|
_base_ = '../gcnet/mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py'
# model settings
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='GenericRoIExtractor',
aggregation='sum',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2)),
mask_roi_extractor=dict(
type='GenericRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2))))
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET
from mmdet.models.builder import HEADS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@HEADS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head with Heuristic method."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
init_cfg=None,
**kwargs):
super(HeuristicFusionHead,
self).__init__(num_things_classes, num_stuff_classes, test_cfg,
None, init_cfg, **kwargs)
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""HeuristicFusionHead has no training loss."""
return dict()
def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5):
"""Lay instance masks to a result map.
Args:
bboxes: The bboxes results, (K, 4).
labels: The labels of bboxes, (K, ).
masks: The instance masks, (K, H, W).
overlap_thr: Threshold to determine whether two masks overlap.
default: 0.5.
Returns:
Tensor: The result map, (H, W).
"""
num_insts = bboxes.shape[0]
id_map = torch.zeros(
masks.shape[-2:], device=bboxes.device, dtype=torch.long)
if num_insts == 0:
return id_map, labels
scores, bboxes = bboxes[:, -1], bboxes[:, :4]
# Sort by score to use heuristic fusion
order = torch.argsort(-scores)
bboxes = bboxes[order]
labels = labels[order]
segm_masks = masks[order]
instance_id = 1
left_labels = []
for idx in range(bboxes.shape[0]):
_cls = labels[idx]
_mask = segm_masks[idx]
instance_id_map = torch.ones_like(
_mask, dtype=torch.long) * instance_id
area = _mask.sum()
if area == 0:
continue
pasted = id_map > 0
intersect = (_mask * pasted).sum()
if (intersect / (area + 1e-5)) > overlap_thr:
continue
_part = _mask * (~pasted)
id_map = torch.where(_part, instance_id_map, id_map)
left_labels.append(_cls)
instance_id += 1
if len(left_labels) > 0:
instance_labels = torch.stack(left_labels)
else:
instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)
assert instance_id == (len(instance_labels) + 1)
return id_map, instance_labels
def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds,
**kwargs):
"""Fuse the results of instance and semantic segmentations.
Args:
det_bboxes: The bboxes results, (K, 4).
det_labels: The labels of bboxes, (K,).
mask_preds: The masks results, (K, H, W).
seg_preds: The semantic segmentation results,
(K, num_stuff + 1, H, W).
Returns:
Tensor : The panoptic segmentation result, (H, W).
"""
mask_preds = mask_preds >= self.test_cfg.mask_thr_binary
id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds,
self.test_cfg.mask_overlap)
seg_results = seg_preds.argmax(dim=0)
seg_results = seg_results + self.num_things_classes
pan_results = seg_results
instance_id = 1
for idx in range(det_labels.shape[0]):
_mask = id_map == (idx + 1)
if _mask.sum() == 0:
continue
_cls = labels[idx]
# simply trust detection
segment_id = _cls + instance_id * INSTANCE_OFFSET
pan_results[_mask] = segment_id
instance_id += 1
ids, counts = torch.unique(
pan_results % INSTANCE_OFFSET, return_counts=True)
stuff_ids = ids[ids >= self.num_things_classes]
stuff_counts = counts[ids >= self.num_things_classes]
ignore_stuff_ids = stuff_ids[
stuff_counts < self.test_cfg.stuff_area_limit]
assert pan_results.ndim == 2
pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(
1, 1, -1)).any(dim=2)] = self.num_classes
return pan_results
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
from mmdet.models.builder import HEADS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@HEADS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head with Heuristic method."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
init_cfg=None,
**kwargs):
super(HeuristicFusionHead,
self).__init__(num_things_classes, num_stuff_classes, test_cfg,
None, init_cfg, **kwargs)
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""HeuristicFusionHead has no training loss."""
return dict()
def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5):
"""Lay instance masks to a result map.
Args:
bboxes: The bboxes results, (K, 4).
labels: The labels of bboxes, (K, ).
masks: The instance masks, (K, H, W).
overlap_thr: Threshold to determine whether two masks overlap.
default: 0.5.
Returns:
Tensor: The result map, (H, W).
"""
num_insts = bboxes.shape[0]
id_map = torch.zeros(
masks.shape[-2:], device=bboxes.device, dtype=torch.long)
if num_insts == 0:
return id_map, labels
scores, bboxes = bboxes[:, -1], bboxes[:, :4]
# Sort by score to use heuristic fusion
order = torch.argsort(-scores)
bboxes = bboxes[order]
labels = labels[order]
segm_masks = masks[order]
instance_id = 1
left_labels = []
for idx in range(bboxes.shape[0]):
_cls = labels[idx]
_mask = segm_masks[idx]
instance_id_map = torch.ones_like(
_mask, dtype=torch.long) * instance_id
area = _mask.sum()
if area == 0:
continue
pasted = id_map > 0
intersect = (_mask * pasted).sum()
if (intersect / (area + 1e-5)) > overlap_thr:
continue
_part = _mask * (~pasted)
id_map = torch.where(_part, instance_id_map, id_map)
left_labels.append(_cls)
instance_id += 1
if len(left_labels) > 0:
instance_labels = torch.stack(left_labels)
else:
instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)
assert instance_id == (len(instance_labels) + 1)
return id_map, instance_labels
def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds,
**kwargs):
"""Fuse the results of instance and semantic segmentations.
Args:
det_bboxes: The bboxes results, (K, 4).
det_labels: The labels of bboxes, (K,).
mask_preds: The masks results, (K, H, W).
seg_preds: The semantic segmentation results,
(K, num_stuff + 1, H, W).
Returns:
Tensor : The panoptic segmentation result, (H, W).
"""
mask_preds = mask_preds >= self.test_cfg.mask_thr_binary
id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds,
self.test_cfg.mask_overlap)
seg_results = seg_preds.argmax(dim=0)
seg_results = seg_results + self.num_things_classes
pan_results = seg_results
instance_id = 1
for idx in range(det_labels.shape[0]):
_mask = id_map == (idx + 1)
if _mask.sum() == 0:
continue
_cls = labels[idx]
# simply trust detection
segment_id = _cls + instance_id * INSTANCE_OFFSET
pan_results[_mask] = segment_id
instance_id += 1
ids, counts = torch.unique(
pan_results % INSTANCE_OFFSET, return_counts=True)
stuff_ids = ids[ids >= self.num_things_classes]
stuff_counts = counts[ids >= self.num_things_classes]
ignore_stuff_ids = stuff_ids[
stuff_counts < self.test_cfg.stuff_area_limit]
assert pan_results.ndim == 2
pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(
1, 1, -1)).any(dim=2)] = self.num_classes
return pan_results
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.