input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from setuptools import find_packages, setup
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="3.1.0.dev0",
author="Nils Reimers, Tom Aarsen",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.34.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy",
"scikit-learn",
"scipy",
"huggingface-hub>=0.15.1",
"Pillow",
],
extras_require={
"train": [
"datasets",
"accelerate>=0.20.3",
],
"dev": [
"datasets",
"accelerate>=0.20.3",
"pre-commit",
"pytest",
"ruff>=0.3.0",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
from setuptools import setup, find_packages
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="2.2.2",
author="Nils Reimers",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
python_requires=">=3.6.0",
install_requires=[
'transformers>=4.6.0,<5.0.0',
'tqdm',
'torch>=1.6.0',
'torchvision',
'numpy',
'scikit-learn',
'scipy',
'nltk',
'sentencepiece',
'huggingface-hub>=0.4.0'
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning"
)
|
"""Output classes.
**Output** classes are used to represent the output of a language model call
and the output of a chat.
The top container for information is the `LLMResult` object. `LLMResult` is used by
both chat models and LLMs. This object contains the output of the language
model and any additional information that the model provider wants to return.
When invoking models via the standard runnable methods (e.g. invoke, batch, etc.):
- Chat models will return `AIMessage` objects.
- LLMs will return regular text strings.
In addition, users can access the raw output of either LLMs or chat models via
callbacks. The on_chat_model_end and on_llm_end callbacks will return an
LLMResult object containing the generated outputs and any additional information
returned by the model provider.
In general, if information is already available
in the AIMessage object, it is recommended to access it from there rather than
from the `LLMResult` object.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.outputs.chat_generation import (
ChatGeneration,
ChatGenerationChunk,
)
from langchain_core.outputs.chat_result import ChatResult
from langchain_core.outputs.generation import Generation, GenerationChunk
from langchain_core.outputs.llm_result import LLMResult
from langchain_core.outputs.run_info import RunInfo
__all__ = [
"ChatGeneration",
"ChatGenerationChunk",
"ChatResult",
"Generation",
"GenerationChunk",
"LLMResult",
"RunInfo",
]
_dynamic_imports = {
"ChatGeneration": "chat_generation",
"ChatGenerationChunk": "chat_generation",
"ChatResult": "chat_result",
"Generation": "generation",
"GenerationChunk": "generation",
"LLMResult": "llm_result",
"RunInfo": "run_info",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Output classes.
**Output** classes are used to represent the output of a language model call
and the output of a chat.
The top container for information is the `LLMResult` object. `LLMResult` is used by
both chat models and LLMs. This object contains the output of the language
model and any additional information that the model provider wants to return.
When invoking models via the standard runnable methods (e.g. invoke, batch, etc.):
- Chat models will return `AIMessage` objects.
- LLMs will return regular text strings.
In addition, users can access the raw output of either LLMs or chat models via
callbacks. The on_chat_model_end and on_llm_end callbacks will return an
LLMResult object containing the generated outputs and any additional information
returned by the model provider.
In general, if information is already available
in the AIMessage object, it is recommended to access it from there rather than
from the `LLMResult` object.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.outputs.chat_generation import (
ChatGeneration,
ChatGenerationChunk,
)
from langchain_core.outputs.chat_result import ChatResult
from langchain_core.outputs.generation import Generation, GenerationChunk
from langchain_core.outputs.llm_result import LLMResult
from langchain_core.outputs.run_info import RunInfo
__all__ = [
"ChatGeneration",
"ChatGenerationChunk",
"ChatResult",
"Generation",
"GenerationChunk",
"LLMResult",
"RunInfo",
]
_dynamic_imports = {
"ChatGeneration": "chat_generation",
"ChatGenerationChunk": "chat_generation",
"ChatResult": "chat_result",
"Generation": "generation",
"GenerationChunk": "generation",
"LLMResult": "llm_result",
"RunInfo": "run_info",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""In memory document index."""
import operator
import uuid
from collections.abc import Sequence
from typing import Any, Optional, cast
from pydantic import Field
from langchain_core._api import beta
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.indexing import UpsertResponse
from langchain_core.indexing.base import DeleteResponse, DocumentIndex
@beta(message="Introduced in version 0.2.29. Underlying abstraction subject to change.")
class InMemoryDocumentIndex(DocumentIndex):
"""In memory document index.
This is an in-memory document index that stores documents in a dictionary.
It provides a simple search API that returns documents by the number of
counts the given query appears in the document.
.. versionadded:: 0.2.29
"""
store: dict[str, Document] = Field(default_factory=dict)
top_k: int = 4
def upsert(self, items: Sequence[Document], /, **kwargs: Any) -> UpsertResponse:
"""Upsert items into the index."""
ok_ids = []
for item in items:
if item.id is None:
id_ = str(uuid.uuid4())
item_ = item.model_copy()
item_.id = id_
else:
item_ = item
id_ = item.id
self.store[id_] = item_
ok_ids.append(cast("str", item_.id))
return UpsertResponse(succeeded=ok_ids, failed=[])
def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse:
"""Delete by ID."""
if ids is None:
msg = "IDs must be provided for deletion"
raise ValueError(msg)
ok_ids = []
for id_ in ids:
if id_ in self.store:
del self.store[id_]
ok_ids.append(id_)
return DeleteResponse(
succeeded=ok_ids, num_deleted=len(ok_ids), num_failed=0, failed=[]
)
def get(self, ids: Sequence[str], /, **kwargs: Any) -> list[Document]:
"""Get by ids."""
return [self.store[id_] for id_ in ids if id_ in self.store]
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
counts_by_doc = []
for document in self.store.values():
count = document.page_content.count(query)
counts_by_doc.append((document, count))
counts_by_doc.sort(key=operator.itemgetter(1), reverse=True)
return [doc.model_copy() for doc, count in counts_by_doc[: self.top_k]]
|
"""In memory document index."""
import operator
import uuid
from collections.abc import Sequence
from typing import Any, Optional, cast
from pydantic import Field
from langchain_core._api import beta
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.indexing import UpsertResponse
from langchain_core.indexing.base import DeleteResponse, DocumentIndex
@beta(message="Introduced in version 0.2.29. Underlying abstraction subject to change.")
class InMemoryDocumentIndex(DocumentIndex):
"""In memory document index.
This is an in-memory document index that stores documents in a dictionary.
It provides a simple search API that returns documents by the number of
counts the given query appears in the document.
.. versionadded:: 0.2.29
"""
store: dict[str, Document] = Field(default_factory=dict)
top_k: int = 4
def upsert(self, items: Sequence[Document], /, **kwargs: Any) -> UpsertResponse:
"""Upsert items into the index."""
ok_ids = []
for item in items:
if item.id is None:
id_ = str(uuid.uuid4())
item_ = item.model_copy()
item_.id = id_
else:
item_ = item
id_ = item.id
self.store[id_] = item_
ok_ids.append(cast("str", item_.id))
return UpsertResponse(succeeded=ok_ids, failed=[])
def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse:
"""Delete by ID."""
if ids is None:
msg = "IDs must be provided for deletion"
raise ValueError(msg)
ok_ids = []
for id_ in ids:
if id_ in self.store:
del self.store[id_]
ok_ids.append(id_)
return DeleteResponse(
succeeded=ok_ids, num_deleted=len(ok_ids), num_failed=0, failed=[]
)
def get(self, ids: Sequence[str], /, **kwargs: Any) -> list[Document]:
"""Get by ids."""
found_documents = []
for id_ in ids:
if id_ in self.store:
found_documents.append(self.store[id_])
return found_documents
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
counts_by_doc = []
for document in self.store.values():
count = document.page_content.count(query)
counts_by_doc.append((document, count))
counts_by_doc.sort(key=operator.itemgetter(1), reverse=True)
return [doc.model_copy() for doc, count in counts_by_doc[: self.top_k]]
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Speech processor class for Whisper
"""
from ...processing_utils import ProcessorMixin
class WhisperProcessor(ProcessorMixin):
r"""
Constructs a Whisper processor which wraps a Whisper feature extractor and a Whisper tokenizer into a single
processor.
[`WhisperProcessor`] offers all the functionalities of [`WhisperFeatureExtractor`] and [`WhisperTokenizer`]. See
the [`~WhisperProcessor.__call__`] and [`~WhisperProcessor.decode`] for more information.
Args:
feature_extractor (`WhisperFeatureExtractor`):
An instance of [`WhisperFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`WhisperTokenizer`):
An instance of [`WhisperTokenizer`]. The tokenizer is a required input.
"""
feature_extractor_class = "WhisperFeatureExtractor"
tokenizer_class = ("WhisperTokenizer", "WhisperTokenizerFast")
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
self.current_processor = self.feature_extractor
self._in_target_context_manager = False
def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)
def __call__(self, *args, **kwargs):
"""
Forwards the `audio` argument to WhisperFeatureExtractor's [`~WhisperFeatureExtractor.__call__`] and the `text`
argument to [`~WhisperTokenizer.__call__`]. Please refer to the docstring of the above two methods for more
information.
"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*args, **kwargs)
audio = kwargs.pop("audio", None)
sampling_rate = kwargs.pop("sampling_rate", None)
text = kwargs.pop("text", None)
if len(args) > 0:
audio = args[0]
args = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
if text is not None:
encodings = self.tokenizer(text, **kwargs)
if text is None:
return inputs
elif audio is None:
return encodings
else:
inputs["labels"] = encodings["input_ids"]
return inputs
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to WhisperTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to WhisperTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
def get_prompt_ids(self, text: str, return_tensors="np"):
return self.tokenizer.get_prompt_ids(text, return_tensors=return_tensors)
__all__ = ["WhisperProcessor"]
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Speech processor class for Whisper
"""
from ...processing_utils import ProcessorMixin
class WhisperProcessor(ProcessorMixin):
r"""
Constructs a Whisper processor which wraps a Whisper feature extractor and a Whisper tokenizer into a single
processor.
[`WhisperProcessor`] offers all the functionalities of [`WhisperFeatureExtractor`] and [`WhisperTokenizer`]. See
the [`~WhisperProcessor.__call__`] and [`~WhisperProcessor.decode`] for more information.
Args:
feature_extractor (`WhisperFeatureExtractor`):
An instance of [`WhisperFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`WhisperTokenizer`):
An instance of [`WhisperTokenizer`]. The tokenizer is a required input.
"""
feature_extractor_class = "WhisperFeatureExtractor"
tokenizer_class = "WhisperTokenizer"
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
self.current_processor = self.feature_extractor
self._in_target_context_manager = False
def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)
def __call__(self, *args, **kwargs):
"""
Forwards the `audio` argument to WhisperFeatureExtractor's [`~WhisperFeatureExtractor.__call__`] and the `text`
argument to [`~WhisperTokenizer.__call__`]. Please refer to the docstring of the above two methods for more
information.
"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*args, **kwargs)
audio = kwargs.pop("audio", None)
sampling_rate = kwargs.pop("sampling_rate", None)
text = kwargs.pop("text", None)
if len(args) > 0:
audio = args[0]
args = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
if text is not None:
encodings = self.tokenizer(text, **kwargs)
if text is None:
return inputs
elif audio is None:
return encodings
else:
inputs["labels"] = encodings["input_ids"]
return inputs
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to WhisperTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to WhisperTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
def get_prompt_ids(self, text: str, return_tensors="np"):
return self.tokenizer.get_prompt_ids(text, return_tensors=return_tensors)
__all__ = ["WhisperProcessor"]
|
"""
Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task.
Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings.
It then computes an accuracy over all possible source sentences src_i. Equivalently, it computes also the accuracy for the other direction.
A high accuracy score indicates that the model is able to find the correct translation out of a large pool with sentences.
Good options for datasets are:
* sentence-transformers/parallel-sentences-wikimatrix
* sentence-transformers/parallel-sentences-tatoeba
* sentence-transformers/parallel-sentences-talks
As these have development sets.
Usage:
python examples/evaluation/evaluation_translation_matching.py [model_name_or_path] [dataset_name] [subset1] [subset2] ...
For example:
python examples/evaluation/evaluation_translation_matching.py distiluse-base-multilingual-cased sentence-transformers/parallel-sentences-tatoeba en-ar en-de en-nl
"""
from sentence_transformers import SentenceTransformer, evaluation
import sys
import logging
from datasets import load_dataset
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1]
dataset_name = sys.argv[2]
subsets = sys.argv[3:]
inference_batch_size = 32
model = SentenceTransformer(model_name)
for subset in subsets:
dataset = load_dataset(dataset_name, subset)
datasets = {}
if dataset.column_names == ["train"]:
num_samples = min(5000, len(dataset["train"]))
datasets[f"train[:{num_samples}]"].append(dataset["train"].select(range(num_samples)))
else:
for split, sub_dataset in dataset.items():
if split != "train":
datasets[split] = sub_dataset
for split, sub_dataset in datasets.items():
logging.info(f"{dataset_name}, subset={subset}, split={split}, num_samples={len(sub_dataset)}")
translation_evaluator = evaluation.TranslationEvaluator(
sub_dataset["english"],
sub_dataset["non_english"],
name=f"{dataset_name}-{subset}-{split}",
batch_size=inference_batch_size,
)
translation_evaluator(model)
|
"""
Given a tab separated file (.tsv) with parallel sentences, where the second column is the translation of the sentence in the first column, for example, in the format:
src1 trg1
src2 trg2
...
where trg_i is the translation of src_i.
Given src_i, the TranslationEvaluator checks which trg_j has the highest similarity using cosine similarity. If i == j, we assume
a match, i.e., the correct translation has been found for src_i out of all possible target sentences.
It then computes an accuracy over all possible source sentences src_i. Equivalently, it computes also the accuracy for the other direction.
A high accuracy score indicates that the model is able to find the correct translation out of a large pool with sentences.
Usage:
python [model_name_or_path] [parallel-file1] [parallel-file2] ...
For example:
python distiluse-base-multilingual-cased talks-en-de.tsv.gz
See the training_multilingual/get_parallel_data_...py scripts for getting parallel sentence data from different sources
"""
from sentence_transformers import SentenceTransformer, evaluation, LoggingHandler
import sys
import gzip
import os
import logging
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
model_name = sys.argv[1]
filepaths = sys.argv[2:]
inference_batch_size = 32
model = SentenceTransformer(model_name)
for filepath in filepaths:
src_sentences = []
trg_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, "r", encoding="utf8"
) as fIn:
for line in fIn:
splits = line.strip().split("\t")
if len(splits) >= 2:
src_sentences.append(splits[0])
trg_sentences.append(splits[1])
logger.info(os.path.basename(filepath) + ": " + str(len(src_sentences)) + " sentence pairs")
dev_trans_acc = evaluation.TranslationEvaluator(
src_sentences, trg_sentences, name=os.path.basename(filepath), batch_size=inference_batch_size
)
dev_trans_acc(model)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio._internal import download_url_to_file
from torchaudio.datasets.utils import _extract_tar
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "waves_yesno",
"url": "http://www.openslr.org/resources/1/waves_yesno.tar.gz",
"checksum": "c3f49e0cca421f96b75b41640749167b52118f232498667ca7a5f9416aef8e73",
}
}
class YESNO(Dataset):
"""*YesNo* :cite:`YesNo` dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"http://www.openslr.org/resources/1/waves_yesno.tar.gz"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"waves_yesno"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
archive = os.path.basename(url)
archive = root / archive
self._path = root / folder_in_archive
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
_extract_tar(archive)
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
self._walker = sorted(str(p.stem) for p in Path(self._path).glob("*.wav"))
def _load_item(self, fileid: str, path: str):
labels = [int(c) for c in fileid.split("_")]
file_audio = os.path.join(path, fileid + ".wav")
waveform, sample_rate = torchaudio.load(file_audio)
return waveform, sample_rate, labels
def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
List[int]:
labels
"""
fileid = self._walker[n]
item = self._load_item(fileid, self._path)
return item
def __len__(self) -> int:
return len(self._walker)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _extract_tar
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "waves_yesno",
"url": "http://www.openslr.org/resources/1/waves_yesno.tar.gz",
"checksum": "c3f49e0cca421f96b75b41640749167b52118f232498667ca7a5f9416aef8e73",
}
}
class YESNO(Dataset):
"""*YesNo* :cite:`YesNo` dataset.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"http://www.openslr.org/resources/1/waves_yesno.tar.gz"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"waves_yesno"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
archive = os.path.basename(url)
archive = root / archive
self._path = root / folder_in_archive
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
_extract_tar(archive)
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
self._walker = sorted(str(p.stem) for p in Path(self._path).glob("*.wav"))
def _load_item(self, fileid: str, path: str):
labels = [int(c) for c in fileid.split("_")]
file_audio = os.path.join(path, fileid + ".wav")
waveform, sample_rate = torchaudio.load(file_audio)
return waveform, sample_rate, labels
def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
List[int]:
labels
"""
fileid = self._walker[n]
item = self._load_item(fileid, self._path)
return item
def __len__(self) -> int:
return len(self._walker)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.vgg19 import VGG19 as VGG19
from keras.src.applications.vgg19 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.vgg19 import preprocess_input as preprocess_input
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.vgg19 import VGG19
from keras.src.applications.vgg19 import decode_predictions
from keras.src.applications.vgg19 import preprocess_input
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import NdArray, PointCloud3DUrl
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
tensors = url.load(samples=n_samples)
assert isinstance(tensors.points, np.ndarray)
assert isinstance(tensors.points, NdArray)
assert tensors.points.shape == (n_samples, 3)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load_with_multiple_geometries_true(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
tensors = url.load(samples=n_samples, multiple_geometries=True)
assert isinstance(tensors.points, np.ndarray)
assert len(tensors.points.shape) == 3
assert tensors.points.shape[1:] == (100, 3)
def test_json_schema():
schema_json_of(PointCloud3DUrl)
def test_dump_json():
url = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='PointCloud3DUrl'):
parse_obj_as(PointCloud3DUrl, path_to_file)
else:
url = parse_obj_as(PointCloud3DUrl, path_to_file)
assert isinstance(url, PointCloud3DUrl)
assert isinstance(url, str)
@pytest.mark.proto
def test_proto_point_cloud_url():
uri = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import NdArray, PointCloud3DUrl
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
point_cloud = url.load(samples=n_samples)
assert isinstance(point_cloud, np.ndarray)
assert isinstance(point_cloud, NdArray)
assert point_cloud.shape == (n_samples, 3)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load_with_multiple_geometries_true(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
point_cloud = url.load(samples=n_samples, multiple_geometries=True)
assert isinstance(point_cloud, np.ndarray)
assert len(point_cloud.shape) == 3
assert point_cloud.shape[1:] == (100, 3)
def test_json_schema():
schema_json_of(PointCloud3DUrl)
def test_dump_json():
url = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='PointCloud3DUrl'):
parse_obj_as(PointCloud3DUrl, path_to_file)
else:
url = parse_obj_as(PointCloud3DUrl, path_to_file)
assert isinstance(url, PointCloud3DUrl)
assert isinstance(url, str)
@pytest.mark.proto
def test_proto_point_cloud_url():
uri = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
import pytest
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.upstage import UpstageEmbedding
UPSTAGE_TEST_API_KEY = "upstage_test_key"
@pytest.fixture()
def upstage_embedding():
return pytest.importorskip(
"llama_index.embeddings.upstage", reason="Cannot import UpstageEmbedding"
).UpstageEmbedding
@pytest.fixture()
def setup_environment(monkeypatch):
monkeypatch.setenv("UPSTAGE_API_KEY", UPSTAGE_TEST_API_KEY)
def test_upstage_embedding_class(upstage_embedding):
names_of_base_classes = [b.__name__ for b in upstage_embedding.__mro__]
assert BaseEmbedding.__name__ in names_of_base_classes
def test_upstage_embedding_fail_wrong_model(upstage_embedding):
with pytest.raises(ValueError):
upstage_embedding(model="foo")
def test_upstage_embedding_model_name(upstage_embedding):
embedding = upstage_embedding(model="embedding")
assert embedding._query_engine == "embedding-query"
embedding = UpstageEmbedding(model="solar-embedding-1-large")
assert embedding._query_engine == "solar-embedding-1-large-query"
def test_upstage_embedding_api_key_alias(upstage_embedding):
embedding1 = upstage_embedding(api_key=UPSTAGE_TEST_API_KEY)
embedding2 = upstage_embedding(upstage_api_key=UPSTAGE_TEST_API_KEY)
embedding3 = upstage_embedding(error_api_key=UPSTAGE_TEST_API_KEY)
assert embedding1.api_key == UPSTAGE_TEST_API_KEY
assert embedding2.api_key == UPSTAGE_TEST_API_KEY
assert embedding3.api_key == ""
def test_upstage_embedding_api_key_with_env(setup_environment, upstage_embedding):
embedding = upstage_embedding()
assert embedding.api_key == UPSTAGE_TEST_API_KEY
|
import pytest
from llama_index.core.base.embeddings.base import BaseEmbedding
UPSTAGE_TEST_API_KEY = "upstage_test_key"
@pytest.fixture()
def upstage_embedding():
return pytest.importorskip(
"llama_index.embeddings.upstage", reason="Cannot import UpstageEmbedding"
).UpstageEmbedding
@pytest.fixture()
def setup_environment(monkeypatch):
monkeypatch.setenv("UPSTAGE_API_KEY", UPSTAGE_TEST_API_KEY)
def test_upstage_embedding_class(upstage_embedding):
names_of_base_classes = [b.__name__ for b in upstage_embedding.__mro__]
assert BaseEmbedding.__name__ in names_of_base_classes
def test_upstage_embedding_fail_wrong_model(upstage_embedding):
with pytest.raises(ValueError):
upstage_embedding(model="foo")
def test_upstage_embedding_api_key_alias(upstage_embedding):
embedding1 = upstage_embedding(api_key=UPSTAGE_TEST_API_KEY)
embedding2 = upstage_embedding(upstage_api_key=UPSTAGE_TEST_API_KEY)
embedding3 = upstage_embedding(error_api_key=UPSTAGE_TEST_API_KEY)
assert embedding1.api_key == UPSTAGE_TEST_API_KEY
assert embedding2.api_key == UPSTAGE_TEST_API_KEY
assert embedding3.api_key == ""
def test_upstage_embedding_api_key_with_env(setup_environment, upstage_embedding):
embedding = upstage_embedding()
assert embedding.api_key == UPSTAGE_TEST_API_KEY
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.xception import Xception as Xception
from keras.src.applications.xception import (
decode_predictions as decode_predictions,
)
from keras.src.applications.xception import preprocess_input as preprocess_input
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.xception import Xception
from keras.src.applications.xception import decode_predictions
from keras.src.applications.xception import preprocess_input
|
"""
=========================================
Label Propagation digits: Active learning
=========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples. Note you can
increase this to label more than 30 by changing `max_iterations`. Labeling
more than 30 can be useful to get a sense for the speed of convergence of
this active learning technique.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from sklearn import datasets
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.semi_supervised import LabelSpreading
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 40
max_iterations = 5
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(max_iterations):
if len(unlabeled_indices) == 0:
print("No unlabeled items left to label.")
break
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = LabelSpreading(gamma=0.25, max_iter=20)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Iteration %i %s" % (i, 70 * "_"))
print(
"Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples)
)
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# select up to 5 digit examples that the classifier is most uncertain about
uncertainty_index = np.argsort(pred_entropies)[::-1]
uncertainty_index = uncertainty_index[
np.isin(uncertainty_index, unlabeled_indices)
][:5]
# keep track of indices that we get labels for
delete_indices = np.array([], dtype=int)
# for more than 5 iterations, visualize the gain only on the first 5
if i < 5:
f.text(
0.05,
(1 - (i + 1) * 0.183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10),
size=10,
)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
# for more than 5 iterations, visualize the gain only on the first 5
if i < 5:
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r, interpolation="none")
sub.set_title(
"predict: %i\ntrue: %i"
% (lp_model.transduction_[image_index], y[image_index]),
size=10,
)
sub.axis("off")
# labeling 5 points, remote from labeled set
(delete_index,) = (unlabeled_indices == image_index).nonzero()
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += len(uncertainty_index)
f.suptitle(
(
"Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model."
),
y=1.15,
)
plt.subplots_adjust(left=0.2, bottom=0.03, right=0.9, top=0.9, wspace=0.2, hspace=0.85)
plt.show()
|
"""
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples. Note you can
increase this to label more than 30 by changing `max_iterations`. Labeling
more than 30 can be useful to get a sense for the speed of convergence of
this active learning technique.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from sklearn import datasets
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.semi_supervised import LabelSpreading
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 40
max_iterations = 5
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(max_iterations):
if len(unlabeled_indices) == 0:
print("No unlabeled items left to label.")
break
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = LabelSpreading(gamma=0.25, max_iter=20)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Iteration %i %s" % (i, 70 * "_"))
print(
"Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples)
)
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# select up to 5 digit examples that the classifier is most uncertain about
uncertainty_index = np.argsort(pred_entropies)[::-1]
uncertainty_index = uncertainty_index[
np.isin(uncertainty_index, unlabeled_indices)
][:5]
# keep track of indices that we get labels for
delete_indices = np.array([], dtype=int)
# for more than 5 iterations, visualize the gain only on the first 5
if i < 5:
f.text(
0.05,
(1 - (i + 1) * 0.183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10),
size=10,
)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
# for more than 5 iterations, visualize the gain only on the first 5
if i < 5:
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r, interpolation="none")
sub.set_title(
"predict: %i\ntrue: %i"
% (lp_model.transduction_[image_index], y[image_index]),
size=10,
)
sub.axis("off")
# labeling 5 points, remote from labeled set
(delete_index,) = (unlabeled_indices == image_index).nonzero()
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += len(uncertainty_index)
f.suptitle(
(
"Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model."
),
y=1.15,
)
plt.subplots_adjust(left=0.2, bottom=0.03, right=0.9, top=0.9, wspace=0.2, hspace=0.85)
plt.show()
|
from jina import Client
from docarray import DocList
from docarray.documents import TextDoc
if __name__ == '__main__':
c = Client(host='grpc://0.0.0.0:54321')
da = c.post('/', DocList[TextDoc]([TextDoc(), TextDoc()]), return_type=DocList[TextDoc])
print(da.text)
|
from jina import Client, DocumentArray
if __name__ == '__main__':
c = Client(host='grpc://0.0.0.0:54321')
da = c.post('/', DocumentArray.empty(2))
print(da.texts)
|
import numpy as np
import pytest
from docarray.documents import Image
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = Image(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
|
import numpy as np
import pytest
from docarray import Image
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = Image(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
|
from typing import Any, Dict, Union
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
"""Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
Args:
format (str or tv_tensors.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.tv_tensors.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def __init__(self, format: Union[str, tv_tensors.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = tv_tensors.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.convert_bounding_box_format(inpt, new_format=self.format) # type: ignore[return-value]
class ClampBoundingBoxes(Transform):
"""Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``canvas_size`` meta-data.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def _transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.clamp_bounding_boxes(inpt) # type: ignore[return-value]
|
from typing import Any, Dict, Union
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
"""[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
.. v2betastatus:: ConvertBoundingBoxFormat transform
Args:
format (str or tv_tensors.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.tv_tensors.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def __init__(self, format: Union[str, tv_tensors.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = tv_tensors.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.convert_bounding_box_format(inpt, new_format=self.format) # type: ignore[return-value]
class ClampBoundingBoxes(Transform):
"""[BETA] Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``canvas_size`` meta-data.
.. v2betastatus:: ClampBoundingBoxes transform
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def _transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.clamp_bounding_boxes(inpt) # type: ignore[return-value]
|
"""Utils for pretty print."""
import textwrap
from pprint import pprint
from typing import Any, Dict
from llama_index.core.base.response.schema import Response
from llama_index.core.schema import NodeWithScore
from llama_index.core.utils import truncate_text
def pprint_metadata(metadata: Dict[str, Any]) -> None:
"""Display metadata for jupyter notebook."""
pprint(metadata)
def pprint_source_node(
source_node: NodeWithScore, source_length: int = 350, wrap_width: int = 70
) -> None:
"""Display source node for jupyter notebook."""
source_text_fmt = truncate_text(
source_node.node.get_content().strip(), source_length
)
print(f"Node ID: {source_node.node.node_id}")
print(f"Similarity: {source_node.score}")
print(textwrap.fill(f"Text: {source_text_fmt}\n", width=wrap_width))
def pprint_response(
response: Response,
source_length: int = 350,
wrap_width: int = 70,
show_source: bool = False,
) -> None:
"""Pretty print response for jupyter notebook."""
if response.response is None:
response_text = "None"
else:
response_text = response.response.strip()
response_text = f"Final Response: {response_text}"
print(textwrap.fill(response_text, width=wrap_width))
if show_source:
for ind, source_node in enumerate(response.source_nodes):
print("_" * wrap_width)
print(f"Source Node {ind + 1}/{len(response.source_nodes)}")
pprint_source_node(
source_node, source_length=source_length, wrap_width=wrap_width
)
|
"""Utils for pretty print."""
import textwrap
from pprint import pprint
from typing import Any, Dict
from llama_index.core.base.response.schema import Response
from llama_index.core.schema import NodeWithScore
from llama_index.core.utils import truncate_text
def pprint_metadata(metadata: Dict[str, Any]) -> None:
"""Display metadata for jupyter notebook."""
pprint(metadata)
def pprint_source_node(
source_node: NodeWithScore, source_length: int = 350, wrap_width: int = 70
) -> None:
"""Display source node for jupyter notebook."""
source_text_fmt = truncate_text(
source_node.node.get_content().strip(), source_length
)
print(f"Node ID: {source_node.node.node_id}")
print(f"Similarity: {source_node.score}")
print(textwrap.fill(f"Text: {source_text_fmt}\n", width=wrap_width))
def pprint_response(
response: Response,
source_length: int = 350,
wrap_width: int = 70,
show_source: bool = False,
) -> None:
"""Pretty print response for jupyter notebook."""
if response.response is None:
response_text = "None"
else:
response_text = response.response.strip()
response_text = f"Final Response: {response_text}"
print(textwrap.fill(response_text, width=wrap_width))
if show_source:
for ind, source_node in enumerate(response.source_nodes):
print("_" * wrap_width)
print(f"Source Node {ind + 1}/{len(response.source_nodes)}")
pprint_source_node(
source_node, source_length=source_length, wrap_width=wrap_width
)
|
import logging
import pathlib
from argparse import ArgumentParser
import sentencepiece as spm
import torch
import torchaudio
from lightning import ConformerRNNTModule
from transforms import get_data_module
logger = logging.getLogger()
def compute_word_level_distance(seq1, seq2):
return torchaudio.functional.edit_distance(seq1.lower().split(), seq2.lower().split())
def run_eval(args):
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
model = ConformerRNNTModule.load_from_checkpoint(args.checkpoint_path, sp_model=sp_model).eval()
data_module = get_data_module(str(args.librispeech_path), str(args.global_stats_path), str(args.sp_model_path))
if args.use_cuda:
model = model.to(device="cuda")
total_edit_distance = 0
total_length = 0
dataloader = data_module.test_dataloader()
with torch.no_grad():
for idx, (batch, sample) in enumerate(dataloader):
actual = sample[0][2]
predicted = model(batch)
total_edit_distance += compute_word_level_distance(actual, predicted)
total_length += len(actual.split())
if idx % 100 == 0:
logger.warning(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.warning(f"Final WER: {total_edit_distance / total_length}")
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint-path",
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
required=True,
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--librispeech-path",
type=pathlib.Path,
help="Path to LibriSpeech datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--use-cuda",
action="store_true",
default=False,
help="Run using CUDA.",
)
args = parser.parse_args()
run_eval(args)
if __name__ == "__main__":
cli_main()
|
import logging
import pathlib
from argparse import ArgumentParser
import torch
import torchaudio
from lightning import ConformerRNNTModule
from transforms import get_data_module
logger = logging.getLogger()
def compute_word_level_distance(seq1, seq2):
return torchaudio.functional.edit_distance(seq1.lower().split(), seq2.lower().split())
def run_eval(args):
model = ConformerRNNTModule.load_from_checkpoint(args.checkpoint_path, sp_model_path=str(args.sp_model_path)).eval()
data_module = get_data_module(str(args.librispeech_path), str(args.global_stats_path), str(args.sp_model_path))
if args.use_cuda:
model = model.to(device="cuda")
total_edit_distance = 0
total_length = 0
dataloader = data_module.test_dataloader()
with torch.no_grad():
for idx, (batch, sample) in enumerate(dataloader):
actual = sample[0][2]
predicted = model(batch)
total_edit_distance += compute_word_level_distance(actual, predicted)
total_length += len(actual.split())
if idx % 100 == 0:
logger.warning(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.warning(f"Final WER: {total_edit_distance / total_length}")
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint-path",
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
required=True,
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--librispeech-path",
type=pathlib.Path,
help="Path to LibriSpeech datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--use-cuda",
action="store_true",
default=False,
help="Run using CUDA.",
)
args = parser.parse_args()
run_eval(args)
if __name__ == "__main__":
cli_main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.builder import HEADS
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class SCNetMaskHead(FCNMaskHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res=True, **kwargs):
super(SCNetMaskHead, self).__init__(**kwargs)
self.conv_to_res = conv_to_res
if conv_to_res:
assert self.conv_kernel_size == 3
self.num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
self.num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
|
from mmdet.models.builder import HEADS
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class SCNetMaskHead(FCNMaskHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res=True, **kwargs):
super(SCNetMaskHead, self).__init__(**kwargs)
self.conv_to_res = conv_to_res
if conv_to_res:
assert self.conv_kernel_size == 3
self.num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
self.num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
|
import logging
import os
import sys
from torchaudio._internal.module_utils import eval_env, fail_with_message, is_module_available, no_op
from .utils import (
_check_cuda_version,
_fail_since_no_sox,
_init_dll_path,
_init_ffmpeg,
_init_sox,
_LazyImporter,
_load_lib,
)
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_sox",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"lazy_import_ffmpeg_ext",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
# Initialize libsox-related features
_SOX_INITIALIZED = False
_USE_SOX = False if os.name == "nt" else eval_env("TORCHAUDIO_USE_SOX", True)
_SOX_MODULE_AVAILABLE = is_module_available("torchaudio.lib._torchaudio_sox")
if _USE_SOX and _SOX_MODULE_AVAILABLE:
try:
_init_sox()
_SOX_INITIALIZED = True
except Exception:
# The initialization of sox extension will fail if supported sox
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize sox extension", exc_info=True)
if os.name == "nt":
fail_if_no_sox = fail_with_message("requires sox extension, which is not supported on Windows.")
elif not _USE_SOX:
fail_if_no_sox = fail_with_message("requires sox extension, but it is disabled. (TORCHAUDIO_USE_SOX=0)")
elif not _SOX_MODULE_AVAILABLE:
fail_if_no_sox = fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. "
"Please build TorchAudio with libsox support. (BUILD_SOX=1)"
)
else:
fail_if_no_sox = no_op if _SOX_INITIALIZED else _fail_since_no_sox
_FFMPEG_EXT = None
def lazy_import_ffmpeg_ext():
"""Load FFmpeg integration based on availability in lazy manner"""
global _FFMPEG_EXT
if _FFMPEG_EXT is None:
_FFMPEG_EXT = _LazyImporter("_torchaudio_ffmpeg", _init_ffmpeg)
return _FFMPEG_EXT
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
import logging
import os
import sys
from torchaudio._internal.module_utils import eval_env, fail_with_message, is_module_available, no_op
try:
from .fb import _init_ffmpeg
except ImportError:
from .utils import _init_ffmpeg
from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _fail_since_no_sox, _init_dll_path, _init_sox, _load_lib
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_sox",
"fail_if_no_ffmpeg",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"_FFMPEG_EXT",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
# Initialize libsox-related features
_SOX_INITIALIZED = False
_USE_SOX = False if os.name == "nt" else eval_env("TORCHAUDIO_USE_SOX", True)
_SOX_MODULE_AVAILABLE = is_module_available("torchaudio.lib._torchaudio_sox")
if _USE_SOX and _SOX_MODULE_AVAILABLE:
try:
_init_sox()
_SOX_INITIALIZED = True
except Exception:
# The initialization of sox extension will fail if supported sox
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize sox extension", exc_info=True)
if os.name == "nt":
fail_if_no_sox = fail_with_message("requires sox extension, which is not supported on Windows.")
elif not _USE_SOX:
fail_if_no_sox = fail_with_message("requires sox extension, but it is disabled. (TORCHAUDIO_USE_SOX=0)")
elif not _SOX_MODULE_AVAILABLE:
fail_if_no_sox = fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. "
"Please build TorchAudio with libsox support. (BUILD_SOX=1)"
)
else:
fail_if_no_sox = no_op if _SOX_INITIALIZED else _fail_since_no_sox
# Initialize FFmpeg-related features
_FFMPEG_EXT = None
_USE_FFMPEG = eval_env("TORCHAUDIO_USE_FFMPEG", True)
if _USE_FFMPEG and _IS_TORCHAUDIO_EXT_AVAILABLE:
try:
_FFMPEG_EXT = _init_ffmpeg()
except Exception:
# The initialization of FFmpeg extension will fail if supported FFmpeg
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize ffmpeg bindings", exc_info=True)
if _USE_FFMPEG:
fail_if_no_ffmpeg = _fail_since_no_ffmpeg if _FFMPEG_EXT is None else no_op
else:
fail_if_no_ffmpeg = fail_with_message("requires ffmpeg extension, but it is disabled. (TORCHAUDIO_USE_FFMPEG=0)")
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
from jina.serve.runtimes.gateway.gateway import BaseGateway
class PlaceHolderGateway(BaseGateway):
pass
|
from jina.serve.runtimes.gateway.gateway import BaseGateway
class PlaceHolderGateway(BaseGateway):
pass
|
_base_ = './faster-rcnn_hrnetv2p-w32-1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Tuple
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.utils import InstanceList, OptConfigType, OptMultiConfig
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor: OptMultiConfig = None,
bbox_head: OptMultiConfig = None,
mask_roi_extractor: OptMultiConfig = None,
mask_head: OptMultiConfig = None,
shared_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
self.shared_head = MODELS.build(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self) -> bool:
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self) -> bool:
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self) -> bool:
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self, *args, **kwargs):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self, *args, **kwargs):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self, *args, **kwargs):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList):
"""Perform forward propagation and loss calculation of the roi head on
the features of the upstream network."""
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (N, C, H, W).
rpn_results_list (list[:obj:`InstanceData`]): list of region
proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results to
the original image. Defaults to True.
Returns:
list[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
assert self.with_bbox, 'Bbox head must be implemented.'
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
# TODO: nms_op in mmcv need be enhanced, the bbox result may get
# difference when not rescale in bbox_head
# If it has the mask branch, the bbox branch does not need
# to be scaled to the original image scale, because the mask
# branch will scale both bbox and mask at the same time.
bbox_rescale = rescale if not self.with_mask else False
results_list = self.predict_bbox(
x,
batch_img_metas,
rpn_results_list,
rcnn_test_cfg=self.test_cfg,
rescale=bbox_rescale)
if self.with_mask:
results_list = self.predict_mask(
x, batch_img_metas, results_list, rescale=rescale)
return results_list
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Tuple
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils import (InstanceList, OptConfigType, OptMultiConfig,
SampleList)
from mmdet.registry import MODELS
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor: OptMultiConfig = None,
bbox_head: OptMultiConfig = None,
mask_roi_extractor: OptMultiConfig = None,
mask_head: OptMultiConfig = None,
shared_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
self.shared_head = MODELS.build(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self) -> bool:
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self) -> bool:
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self) -> bool:
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self, *args, **kwargs):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self, *args, **kwargs):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self, *args, **kwargs):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList):
"""Perform forward propagation and loss calculation of the roi head on
the features of the upstream network."""
def predict(self,
x: Tuple[Tensor],
rpn_results_list: InstanceList,
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the roi head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (N, C, H, W).
rpn_results_list (list[:obj:`InstanceData`]): list of region
proposals.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results to
the original image. Defaults to True.
Returns:
list[obj:`InstanceData`]: Detection results of each image.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
assert self.with_bbox, 'Bbox head must be implemented.'
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
# TODO: nms_op in mmcv need be enhanced, the bbox result may get
# difference when not rescale in bbox_head
# If it has the mask branch, the bbox branch does not need
# to be scaled to the original image scale, because the mask
# branch will scale both bbox and mask at the same time.
bbox_rescale = rescale if not self.with_mask else False
results_list = self.predict_bbox(
x,
batch_img_metas,
rpn_results_list,
rcnn_test_cfg=self.test_cfg,
rescale=bbox_rescale)
if self.with_mask:
results_list = self.predict_mask(
x, batch_img_metas, results_list, rescale=rescale)
return results_list
|
import copy
from pathlib import Path
import clip
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray, Executor
from ...clip_text import CLIPTextEncoder
@pytest.fixture(scope="module")
def encoder() -> CLIPTextEncoder:
return CLIPTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.pretrained_model_name_or_path == 'openai/clip-vit-base-patch32'
def test_no_documents(encoder: CLIPTextEncoder):
docs = DocumentArray()
encoder.encode(docs=DocumentArray(), parameters={})
assert len(docs) == 0
def test_none_docs(encoder: CLIPTextEncoder):
encoder.encode(docs=None, parameters={})
def test_docs_no_texts(encoder: CLIPTextEncoder):
docs = DocumentArray([Document()])
encoder.encode(docs=DocumentArray(), parameters={})
assert len(docs) == 1
assert docs[0].embedding is None
def test_compute_tokens(encoder: CLIPTextEncoder):
tokens = encoder._generate_input_tokens(
["hello this is a test", "and another test"]
)
assert tokens["input_ids"].shape == (2, 7)
assert tokens["attention_mask"].shape == (2, 7)
def test_encoding_cpu():
encoder = CLIPTextEncoder(device="cpu")
input_data = DocumentArray([Document(text="hello world")])
encoder.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (512,)
@pytest.mark.parametrize("batch_size", [1, 2, 4, 8])
def test_batch_size(encoder: CLIPTextEncoder, batch_size: int):
text = "Jina is Lit"
docs = DocumentArray([Document(text=text) for _ in range(32)])
encoder.encode(docs, parameters={"batch_size": batch_size})
for doc in docs:
assert doc.embedding.shape == (512,)
def test_encodes_semantic_meaning():
"""
Check if the distance between embeddings of similar sentences are smaller
than dissimilar pair of sentences.
"""
docs = DocumentArray(
[
Document(id="A", text="a furry animal that with a long tail"),
Document(id="B", text="a domesticated mammal with four legs"),
Document(id="C", text="a type of aircraft that uses rotating wings"),
Document(id="D", text="flying vehicle that has fixed wings and engines"),
]
)
clip_text_encoder = CLIPTextEncoder()
clip_text_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ["B", "A", "D", "C"]
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
def test_openai_embed_match():
docs = []
sentences = [
"Jina AI is lit",
"Jina AI is great",
"Jina AI is a cloud-native neural search company",
"Jina AI is a github repo",
"Jina AI is an open source neural search project",
]
for sentence in sentences:
docs.append(Document(text=sentence))
clip_text_encoder = CLIPTextEncoder("openai/clip-vit-base-patch32")
clip_text_encoder.encode(DocumentArray(docs), {})
txt_to_ndarray = {}
for d in docs:
txt_to_ndarray[d.text] = d.embedding
# assert same results with OpenAI's implementation
model, preprocess = clip.load("ViT-B/32", device="cpu")
assert len(txt_to_ndarray) == 5
for text, actual_embedding in txt_to_ndarray.items():
with torch.no_grad():
tokens = clip.tokenize(text)
expected_embedding = model.encode_text(tokens).detach().numpy().flatten()
np.testing.assert_almost_equal(actual_embedding, expected_embedding, 5)
def test_traversal_path():
text = "blah"
docs = DocumentArray([Document(id="root1", text=text)])
docs[0].chunks = [
Document(id="chunk11", text=text),
Document(id="chunk12", text=text),
Document(id="chunk13", text=text),
]
docs[0].chunks[0].chunks = [
Document(id="chunk111", text=text),
Document(id="chunk112", text=text),
]
encoder = CLIPTextEncoder(default_traversal_paths=["c"], model_name="ViT-B/32")
original_docs = copy.deepcopy(docs)
encoder.encode(docs=docs, parameters={}, return_results=True)
for path, count in [["r", 0], ["c", 3], ["cc", 0]]:
assert len(docs.traverse_flat([path]).get_attributes("embedding")) == count
encoder.encode(
docs=original_docs, parameters={"traversal_paths": ["cc"]}, return_results=True
)
for path, count in [["r", 0], ["c", 0], ["cc", 2]]:
assert (
len(original_docs.traverse_flat([path]).get_attributes("embedding"))
== count
)
|
import copy
import clip
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray
from ...clip_text import CLIPTextEncoder
@pytest.fixture(scope="module")
def encoder() -> CLIPTextEncoder:
return CLIPTextEncoder()
def test_no_documents(encoder: CLIPTextEncoder):
docs = DocumentArray()
encoder.encode(docs=DocumentArray(), parameters={})
assert len(docs) == 0
def test_none_docs(encoder: CLIPTextEncoder):
encoder.encode(docs=None, parameters={})
def test_docs_no_texts(encoder: CLIPTextEncoder):
docs = DocumentArray([Document()])
encoder.encode(docs=DocumentArray(), parameters={})
assert len(docs) == 1
assert docs[0].embedding is None
def test_compute_tokens(encoder: CLIPTextEncoder):
tokens = encoder._generate_input_tokens(
["hello this is a test", "and another test"]
)
assert tokens["input_ids"].shape == (2, 7)
assert tokens["attention_mask"].shape == (2, 7)
def test_encoding_cpu():
encoder = CLIPTextEncoder(device="cpu")
input_data = DocumentArray([Document(text="hello world")])
encoder.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (512,)
@pytest.mark.parametrize("batch_size", [1, 2, 4, 8])
def test_batch_size(encoder: CLIPTextEncoder, batch_size: int):
text = "Jina is Lit"
docs = DocumentArray([Document(text=text) for _ in range(32)])
encoder.encode(docs, parameters={"batch_size": batch_size})
for doc in docs:
assert doc.embedding.shape == (512,)
def test_encodes_semantic_meaning():
"""
Check if the distance between embeddings of similar sentences are smaller
than dissimilar pair of sentences.
"""
docs = DocumentArray(
[
Document(id="A", text="a furry animal that with a long tail"),
Document(id="B", text="a domesticated mammal with four legs"),
Document(id="C", text="a type of aircraft that uses rotating wings"),
Document(id="D", text="flying vehicle that has fixed wings and engines"),
]
)
clip_text_encoder = CLIPTextEncoder()
clip_text_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ["B", "A", "D", "C"]
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
def test_openai_embed_match():
docs = []
sentences = [
"Jina AI is lit",
"Jina AI is great",
"Jina AI is a cloud-native neural search company",
"Jina AI is a github repo",
"Jina AI is an open source neural search project",
]
for sentence in sentences:
docs.append(Document(text=sentence))
clip_text_encoder = CLIPTextEncoder("openai/clip-vit-base-patch32")
clip_text_encoder.encode(DocumentArray(docs), {})
txt_to_ndarray = {}
for d in docs:
txt_to_ndarray[d.text] = d.embedding
# assert same results with OpenAI's implementation
model, preprocess = clip.load("ViT-B/32", device="cpu")
assert len(txt_to_ndarray) == 5
for text, actual_embedding in txt_to_ndarray.items():
with torch.no_grad():
tokens = clip.tokenize(text)
expected_embedding = model.encode_text(tokens).detach().numpy().flatten()
np.testing.assert_almost_equal(actual_embedding, expected_embedding, 5)
def test_traversal_path():
text = "blah"
docs = DocumentArray([Document(id="root1", text=text)])
docs[0].chunks = [
Document(id="chunk11", text=text),
Document(id="chunk12", text=text),
Document(id="chunk13", text=text),
]
docs[0].chunks[0].chunks = [
Document(id="chunk111", text=text),
Document(id="chunk112", text=text),
]
encoder = CLIPTextEncoder(default_traversal_paths=["c"], model_name="ViT-B/32")
original_docs = copy.deepcopy(docs)
encoder.encode(docs=docs, parameters={}, return_results=True)
for path, count in [["r", 0], ["c", 3], ["cc", 0]]:
assert len(docs.traverse_flat([path]).get_attributes("embedding")) == count
encoder.encode(
docs=original_docs, parameters={"traversal_paths": ["cc"]}, return_results=True
)
for path, count in [["r", 0], ["c", 0], ["cc", 2]]:
assert (
len(original_docs.traverse_flat([path]).get_attributes("embedding"))
== count
)
|
import numpy as np
import pytest
from docarray import Image
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = Image(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
|
import numpy as np
from docarray import Image
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
def test_image():
image = Image(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
|
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='RPN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=None,
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
# model settings
model = dict(
type='RPN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=None,
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.24.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.23.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from typing import Dict, Iterable, Sequence
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayRedis``"""
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from redis
"""
try:
result = self._client.hgetall(self._doc_prefix + _id)
doc = Document.from_base64(result[b'blob'])
return doc
except Exception as ex:
raise KeyError(_id) from ex
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
pipe = self._client.pipeline()
for id in ids:
pipe.hgetall(self._doc_prefix + id)
results = pipe.execute()
for i, result in enumerate(results):
if result:
accumulated_docs.append(Document.from_base64(result[b'blob']))
else:
accumulated_docs_id_not_found.append(ids[i])
if accumulated_docs_id_not_found:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
self._del_doc_by_id(_id)
if _id != value.id:
self._del_doc_by_id(value.id)
payload = self._document_to_redis(value)
self._client.hset(self._doc_prefix + value.id, mapping=payload)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._del_doc_by_id(_id)
if _id != doc.id:
self._del_doc_by_id(doc.id)
self._upload_batch(docs)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(self._doc_prefix + _id)
def _document_to_redis(self, doc: 'Document') -> Dict:
extra_columns = {}
for col, _ in self._config.columns.items():
tag = doc.tags.get(col)
if tag is not None:
extra_columns[col] = int(tag) if isinstance(tag, bool) else tag
if self._config.tag_indices:
for index in self._config.tag_indices:
text = doc.tags.get(index)
if text is not None:
extra_columns[index] = text
payload = {
'id': doc.id,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if doc.text:
payload['text'] = doc.text
return payload
def _load_offset2ids(self):
if self._list_like:
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids, list_like=self._list_like)
else:
self._offset2ids = Offset2ID([], list_like=self._list_like)
def _save_offset2ids(self):
if self._list_like:
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.ft(index_name=self._config.index_name).dropindex(
delete_documents=True
)
self._build_index(rebuild=True)
self._client.delete(self._offset2id_key)
|
from typing import Dict, Iterable, Sequence
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayRedis``"""
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from redis
"""
try:
result = self._client.hgetall(self._doc_prefix + _id)
doc = Document.from_base64(result[b'blob'])
return doc
except Exception as ex:
raise KeyError(_id) from ex
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
pipe = self._client.pipeline()
for id in ids:
pipe.hgetall(self._doc_prefix + id)
results = pipe.execute()
for i, result in enumerate(results):
if result:
accumulated_docs.append(Document.from_base64(result[b'blob']))
else:
accumulated_docs_id_not_found.append(ids[i])
if accumulated_docs_id_not_found:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
self._del_doc_by_id(_id)
if _id != value.id:
self._del_doc_by_id(value.id)
payload = self._document_to_redis(value)
self._client.hset(self._doc_prefix + value.id, mapping=payload)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._del_doc_by_id(_id)
if _id != doc.id:
self._del_doc_by_id(doc.id)
self._upload_batch(docs)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(self._doc_prefix + _id)
def _document_to_redis(self, doc: 'Document') -> Dict:
extra_columns = {}
for col, _ in self._config.columns.items():
tag = doc.tags.get(col)
if tag is not None:
extra_columns[col] = int(tag) if isinstance(tag, bool) else tag
if self._config.tag_indices:
for index in self._config.tag_indices:
text = doc.tags.get(index)
if text is not None:
extra_columns[index] = text
payload = {
'id': doc.id,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if doc.text:
payload['text'] = doc.text
return payload
def _load_offset2ids(self):
if self._list_like:
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids, list_like=self._list_like)
else:
self._offset2ids = Offset2ID([], list_like=self._list_like)
def _save_offset2ids(self):
if self._list_like:
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.ft(index_name=self._config.index_name).dropindex(
delete_documents=True
)
self._client.delete(self._offset2id_key)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.transforms import Compose
from mmengine.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class PipelineSwitchHook(Hook):
"""Switch data pipeline at switch_epoch.
Args:
switch_epoch (int): switch pipeline at this epoch.
switch_pipeline (list[dict]): the pipeline to switch to.
"""
def __init__(self, switch_epoch, switch_pipeline):
self.switch_epoch = switch_epoch
self.switch_pipeline = switch_pipeline
self._restart_dataloader = False
self._has_switched = False
def before_train_epoch(self, runner):
"""switch pipeline."""
epoch = runner.epoch
train_loader = runner.train_dataloader
if epoch >= self.switch_epoch and not self._has_switched:
runner.logger.info('Switch pipeline now!')
# The dataset pipeline cannot be updated when persistent_workers
# is True, so we need to force the dataloader's multi-process
# restart. This is a very hacky approach.
train_loader.dataset.pipeline = Compose(self.switch_pipeline)
if hasattr(train_loader, 'persistent_workers'
) and train_loader.persistent_workers is True:
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
self._has_switched = True
else:
# Once the restart is complete, we need to restore
# the initialization flag.
if self._restart_dataloader:
train_loader._DataLoader__initialized = True
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.transforms import Compose
from mmengine.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class PipelineSwitchHook(Hook):
"""Switch data pipeline at switch_epoch.
Args:
switch_epoch (int): switch pipeline at this epoch.
switch_pipeline (list[dict]): the pipeline to switch to.
"""
def __init__(self, switch_epoch, switch_pipeline):
self.switch_epoch = switch_epoch
self.switch_pipeline = switch_pipeline
self._restart_dataloader = False
def before_train_epoch(self, runner):
"""switch pipeline."""
epoch = runner.epoch
train_loader = runner.train_dataloader
if epoch == self.switch_epoch:
runner.logger.info('Switch pipeline now!')
# The dataset pipeline cannot be updated when persistent_workers
# is True, so we need to force the dataloader's multi-process
# restart. This is a very hacky approach.
train_loader.dataset.pipeline = Compose(self.switch_pipeline)
if hasattr(train_loader, 'persistent_workers'
) and train_loader.persistent_workers is True:
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
else:
# Once the restart is complete, we need to restore
# the initialization flag.
if self._restart_dataloader:
train_loader._DataLoader__initialized = True
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'RTMDet', 'CrowdDet', 'CondInst'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'RTMDet', 'CrowdDet'
]
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]
visualizer = dict(vis_backends=vis_backends)
# MMEngine support the following two ways, users can choose
# according to convenience
# default_hooks = dict(checkpoint=dict(interval=4))
_base_.default_hooks.checkpoint.interval = 4
# train_cfg = dict(val_interval=2)
_base_.train_cfg.val_interval = 2
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
vis_backends = [dict(type='LocalVisBackend'), dict(type='WandBVisBackend')]
visualizer = dict(vis_backends=vis_backends)
# MMEngine support the following two ways, users can choose
# according to convenience
# default_hooks = dict(checkpoint=dict(interval=4))
_base_.default_hooks.checkpoint.interval = 4
# train_cfg = dict(val_interval=2)
_base_.train_cfg.val_interval = 2
|
from textwrap import dedent
from types import SimpleNamespace
from unittest.mock import patch
from urllib.parse import quote
import pytest
from huggingface_hub import CommitOperationAdd, CommitOperationDelete
import datasets
from datasets.config import METADATA_CONFIGS_FIELD
from datasets.hub import delete_from_hub
from datasets.utils.hub import hf_dataset_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_dataset_url(repo_id, filename, revision):
url = hf_dataset_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
def test_delete_from_hub(
temporary_repo, hf_api, hf_token, csv_path, tmp_path, ci_hub_config, ci_hfh_hf_hub_url
) -> None:
with temporary_repo() as repo_id:
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset")
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="cats/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="dogs/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
readme_path = tmp_path / "README.md"
readme_path.write_text(
dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
- config_name: dogs
data_files:
- split: train
path: dogs/train/*
---
""")
)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(readme_path),
path_in_repo="README.md",
repo_id=repo_id,
repo_type="dataset",
)
commit_info = SimpleNamespace(
pr_url="https:///hub-ci.huggingface.co/datasets/__DUMMY_USER__/__DUMMY_DATASET__/refs%2Fpr%2F1"
)
with patch.object(datasets.hub.HfApi, "create_commit", return_value=commit_info) as mock_method:
delete_from_hub(repo_id, "dogs")
assert mock_method.called
assert mock_method.call_args.kwargs.get("commit_message") == "Delete 'dogs' config"
assert mock_method.call_args.kwargs.get("create_pr")
expected_operations = [
CommitOperationDelete(path_in_repo="dogs/train/0000.csv", is_folder=False),
CommitOperationAdd(
path_in_repo="README.md",
path_or_fileobj=b"---\nconfigs:\n- config_name: cats\n data_files:\n - split: train\n path: cats/train/*\n---\n",
),
]
assert mock_method.call_args.kwargs.get("operations") == expected_operations
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_dataset_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_dataset_url(repo_id, filename, revision):
url = hf_dataset_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
|
import numpy as np
import scipy.linalg as sl
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.numpy.core import convert_to_tensor
def cholesky(a):
return np.linalg.cholesky(a)
def det(a):
return np.linalg.det(a)
def eig(a):
return np.linalg.eig(a)
def eigh(a):
return np.linalg.eigh(a)
def inv(a):
return np.linalg.inv(a)
def lu_factor(a):
if a.ndim == 2:
return sl.lu_factor(a)
m, n = a.shape[-2:]
signature = "(m,n) -> (m,n), "
signature += "(m)" if m <= n else "(n)"
_lu_factor_gufunc = np.vectorize(
sl.lu_factor,
signature=signature,
)
return _lu_factor_gufunc(a)
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
if "int" in dtype or dtype == "bool":
dtype = dtypes.result_type(x.dtype, "float32")
return np.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims).astype(
dtype
)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return np.linalg.qr(x, mode=mode)
def solve(a, b):
return np.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if a.ndim == 2:
return sl.solve_triangular(a, b, lower=lower)
_vectorized_solve_triangular = np.vectorize(
lambda a, b: sl.solve_triangular(a, b, lower=lower),
signature="(n,n),(n,m)->(n,m)",
)
if b.ndim == a.ndim - 1:
b = np.expand_dims(b, axis=-1)
return _vectorized_solve_triangular(a, b).squeeze(axis=-1)
return _vectorized_solve_triangular(a, b)
def svd(x, full_matrices=True, compute_uv=True):
return np.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv)
def lstsq(a, b, rcond=None):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return np.linalg.lstsq(a, b, rcond=rcond)[0]
|
import numpy as np
import scipy.linalg as sl
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.numpy.core import convert_to_tensor
def cholesky(a):
return np.linalg.cholesky(a)
def det(a):
return np.linalg.det(a)
def eig(a):
return np.linalg.eig(a)
def eigh(a):
return np.linalg.eigh(a)
def inv(a):
return np.linalg.inv(a)
def lu_factor(a):
if a.ndim == 2:
return sl.lu_factor(a)
m, n = a.shape[-2:]
signature = "(m,n) -> (m,n), "
signature += "(m)" if m <= n else "(n)"
_lu_factor_gufunc = np.vectorize(
sl.lu_factor,
signature=signature,
)
return _lu_factor_gufunc(a)
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
if "int" in dtype or dtype == "bool":
dtype = dtypes.result_type(x.dtype, "float32")
return np.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims).astype(
dtype
)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return np.linalg.qr(x, mode=mode)
def solve(a, b):
return np.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if a.ndim == 2:
return sl.solve_triangular(a, b, lower=lower)
_vectorized_solve_triangular = np.vectorize(
lambda a, b: sl.solve_triangular(a, b, lower=lower),
signature="(n,n),(n,m)->(n,m)",
)
if b.ndim == a.ndim - 1:
b = np.expand_dims(b, axis=-1)
return _vectorized_solve_triangular(a, b).squeeze(axis=-1)
return _vectorized_solve_triangular(a, b)
def svd(x, full_matrices=True, compute_uv=True):
return np.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .boxinst import BoxInst
from .base_detr import DetectionTransformer
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .conditional_detr import ConditionalDETR
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .dab_detr import DABDETR
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .dino import DINO
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer', 'ConditionalDETR', 'DINO', 'DABDETR'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .boxinst import BoxInst
from .base_detr import DetectionTransformer
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .conditional_detr import ConditionalDETR
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .dino import DINO
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer', 'ConditionalDETR', 'DINO'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from pathlib import Path
import mmcv
import torch
from mmcv.runner import load_checkpoint
from mmdet.registry import MODELS
from .. import build_detector
from .single_stage import SingleStageDetector
@MODELS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
teacher_config (str | dict): Config file path
or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
"""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_config,
teacher_ckpt=None,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, (str, Path)):
teacher_config = mmcv.Config.fromfile(teacher_config)
self.teacher_model = build_detector(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(img)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(img)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.forward_train(x, out_teacher, img_metas,
gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
def cuda(self, device=None):
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling cuda function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def train(self, mode=True):
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name, value):
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
|
# Copyright (c) OpenMMLab. All rights reserved.
from pathlib import Path
import mmcv
import torch
from mmcv.runner import load_checkpoint
from .. import build_detector
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
teacher_config (str | dict): Config file path
or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
"""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_config,
teacher_ckpt=None,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, (str, Path)):
teacher_config = mmcv.Config.fromfile(teacher_config)
self.teacher_model = build_detector(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(img)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(img)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.forward_train(x, out_teacher, img_metas,
gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
def cuda(self, device=None):
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling cuda function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def train(self, mode=True):
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name, value):
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import nn
from ..utils.dist_utils import all_reduce_dict
def get_norm_states(module):
async_norm_states = OrderedDict()
for name, child in module.named_modules():
if isinstance(child, nn.modules.batchnorm._NormBase):
for k, v in child.state_dict().items():
async_norm_states['.'.join([name, k])] = v
return async_norm_states
@HOOKS.register_module()
class SyncNormHook(Hook):
"""Synchronize Norm states after training epoch, currently used in YOLOX.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to switch to synchronizing norm interval. Default: 15.
interval (int): Synchronizing norm interval. Default: 1.
"""
def __init__(self, num_last_epochs=15, interval=1):
self.interval = interval
self.num_last_epochs = num_last_epochs
def before_train_epoch(self, runner):
epoch = runner.epoch
if (epoch + 1) == runner.max_epochs - self.num_last_epochs:
# Synchronize norm every epoch.
self.interval = 1
def after_train_epoch(self, runner):
"""Synchronizing norm."""
epoch = runner.epoch
module = runner.model
if (epoch + 1) % self.interval == 0:
_, world_size = get_dist_info()
if world_size == 1:
return
norm_states = get_norm_states(module)
norm_states = all_reduce_dict(norm_states, op='mean')
module.load_state_dict(norm_states, strict=False)
|
from collections import OrderedDict
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import nn
from ..utils.dist_utils import all_reduce_dict
def get_norm_states(module):
async_norm_states = OrderedDict()
for name, child in module.named_modules():
if isinstance(child, nn.modules.batchnorm._NormBase):
for k, v in child.state_dict().items():
async_norm_states['.'.join([name, k])] = v
return async_norm_states
@HOOKS.register_module()
class SyncNormHook(Hook):
"""Synchronize Norm states after training epoch, currently used in YOLOX.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to switch to synchronizing norm interval. Default: 15.
interval (int): Synchronizing norm interval. Default: 1.
"""
def __init__(self, num_last_epochs=15, interval=1):
self.interval = interval
self.num_last_epochs = num_last_epochs
def before_train_epoch(self, runner):
epoch = runner.epoch
if (epoch + 1) == runner.max_epochs - self.num_last_epochs:
# Synchronize norm every epoch.
self.interval = 1
def after_train_epoch(self, runner):
"""Synchronizing norm."""
epoch = runner.epoch
module = runner.model
if (epoch + 1) % self.interval == 0:
_, world_size = get_dist_info()
if world_size == 1:
return
norm_states = get_norm_states(module)
norm_states = all_reduce_dict(norm_states, op='mean')
module.load_state_dict(norm_states, strict=False)
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
class ToolAgentAction(AgentActionMessageLog):
tool_call_id: str
"""Tool call that this message is responding to."""
def parse_ai_message_to_tool_action(
message: BaseMessage,
) -> Union[list[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
actions: list = []
if message.tool_calls:
tool_calls = message.tool_calls
else:
if not message.additional_kwargs.get("tool_calls"):
return AgentFinish(
return_values={"output": message.content}, log=str(message.content)
)
# Best-effort parsing
tool_calls = []
for tool_call in message.additional_kwargs["tool_calls"]:
function = tool_call["function"]
function_name = function["name"]
try:
args = json.loads(function["arguments"] or "{}")
tool_calls.append(
ToolCall(name=function_name, args=args, id=tool_call["id"])
)
except JSONDecodeError:
msg = (
f"Could not parse tool input: {function} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg)
for tool_call in tool_calls:
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
function_name = tool_call["name"]
_tool_input = tool_call["args"]
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
actions.append(
ToolAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
tool_call_id=tool_call["id"],
)
)
return actions
class ToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "tools-agent-output-parser"
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Union[list[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg)
message = result[0].message
return parse_ai_message_to_tool_action(message)
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
msg = "Can only parse messages"
raise ValueError(msg)
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
class ToolAgentAction(AgentActionMessageLog):
tool_call_id: str
"""Tool call that this message is responding to."""
def parse_ai_message_to_tool_action(
message: BaseMessage,
) -> Union[list[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
raise TypeError(f"Expected an AI message got {type(message)}")
actions: list = []
if message.tool_calls:
tool_calls = message.tool_calls
else:
if not message.additional_kwargs.get("tool_calls"):
return AgentFinish(
return_values={"output": message.content}, log=str(message.content)
)
# Best-effort parsing
tool_calls = []
for tool_call in message.additional_kwargs["tool_calls"]:
function = tool_call["function"]
function_name = function["name"]
try:
args = json.loads(function["arguments"] or "{}")
tool_calls.append(
ToolCall(name=function_name, args=args, id=tool_call["id"])
)
except JSONDecodeError:
raise OutputParserException(
f"Could not parse tool input: {function} because "
f"the `arguments` is not valid JSON."
)
for tool_call in tool_calls:
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
function_name = tool_call["name"]
_tool_input = tool_call["args"]
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
actions.append(
ToolAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
tool_call_id=tool_call["id"],
)
)
return actions
class ToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "tools-agent-output-parser"
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Union[list[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
raise ValueError("This output parser only works on ChatGeneration output")
message = result[0].message
return parse_ai_message_to_tool_action(message)
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
raise ValueError("Can only parse messages")
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')),
neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg),
mask_head=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg)))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://jhu/resnet50_gn_ws')),
neck=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg),
mask_head=dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg)))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_torch
from transformers.utils import ContextManagers, find_labels, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
MODEL_ID = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
REVISION_ID_DEFAULT = "main"
# Default branch name
REVISION_ID_ONE_SPECIFIC_COMMIT = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
REVISION_ID_INVALID = "aaaaaaa"
# This commit does not exist, so we should 404.
PINNED_SHA1 = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
PINNED_SHA256 = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
# Sha-256 of pytorch_model.bin on the top of `main`, for checking purposes
# Dummy contexts to test `ContextManagers`
@contextlib.contextmanager
def context_en():
print("Welcome!")
yield
print("Bye!")
@contextlib.contextmanager
def context_fr():
print("Bonjour!")
yield
print("Au revoir!")
class TestImportMechanisms(unittest.TestCase):
def test_module_spec_available(self):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers") is not None
class GenericUtilTests(unittest.TestCase):
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def test_context_managers_no_context(self, mock_stdout):
with ContextManagers([]):
print("Transformers are awesome!")
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue(), "Transformers are awesome!\n")
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def test_context_managers_one_context(self, mock_stdout):
with ContextManagers([context_en()]):
print("Transformers are awesome!")
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue(), "Welcome!\nTransformers are awesome!\nBye!\n")
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def test_context_managers_two_context(self, mock_stdout):
with ContextManagers([context_fr(), context_en()]):
print("Transformers are awesome!")
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue(), "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n")
@require_torch
def test_find_labels_pt(self):
self.assertEqual(find_labels(BertForSequenceClassification), ["labels"])
self.assertEqual(find_labels(BertForPreTraining), ["labels", "next_sentence_label"])
self.assertEqual(find_labels(BertForQuestionAnswering), ["start_positions", "end_positions"])
# find_labels works regardless of the class name (it detects the framework through inheritance)
class DummyModel(BertForSequenceClassification):
pass
self.assertEqual(find_labels(DummyModel), ["labels"])
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
MODEL_ID = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
REVISION_ID_DEFAULT = "main"
# Default branch name
REVISION_ID_ONE_SPECIFIC_COMMIT = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
REVISION_ID_INVALID = "aaaaaaa"
# This commit does not exist, so we should 404.
PINNED_SHA1 = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
PINNED_SHA256 = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
# Sha-256 of pytorch_model.bin on the top of `main`, for checking purposes
# Dummy contexts to test `ContextManagers`
@contextlib.contextmanager
def context_en():
print("Welcome!")
yield
print("Bye!")
@contextlib.contextmanager
def context_fr():
print("Bonjour!")
yield
print("Au revoir!")
class TestImportMechanisms(unittest.TestCase):
def test_module_spec_available(self):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers") is not None
class GenericUtilTests(unittest.TestCase):
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def test_context_managers_no_context(self, mock_stdout):
with ContextManagers([]):
print("Transformers are awesome!")
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue(), "Transformers are awesome!\n")
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def test_context_managers_one_context(self, mock_stdout):
with ContextManagers([context_en()]):
print("Transformers are awesome!")
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue(), "Welcome!\nTransformers are awesome!\nBye!\n")
@unittest.mock.patch("sys.stdout", new_callable=io.StringIO)
def test_context_managers_two_context(self, mock_stdout):
with ContextManagers([context_fr(), context_en()]):
print("Transformers are awesome!")
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue(), "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n")
@require_torch
def test_find_labels_pt(self):
self.assertEqual(find_labels(BertForSequenceClassification), ["labels"])
self.assertEqual(find_labels(BertForPreTraining), ["labels", "next_sentence_label"])
self.assertEqual(find_labels(BertForQuestionAnswering), ["start_positions", "end_positions"])
# find_labels works regardless of the class name (it detects the framework through inheritance)
class DummyModel(BertForSequenceClassification):
pass
self.assertEqual(find_labels(DummyModel), ["labels"])
@require_flax
def test_find_labels_flax(self):
# Flax models don't have labels
self.assertEqual(find_labels(FlaxBertForSequenceClassification), [])
self.assertEqual(find_labels(FlaxBertForPreTraining), [])
self.assertEqual(find_labels(FlaxBertForQuestionAnswering), [])
# find_labels works regardless of the class name (it detects the framework through inheritance)
class DummyModel(FlaxBertForSequenceClassification):
pass
self.assertEqual(find_labels(DummyModel), [])
|
import torch
from docarray.typing.tensor.torch_tensor import TorchTensor
import copy
from docarray import BaseDoc
from docarray.typing import TorchEmbedding, TorchTensor
def test_set_torch_tensor():
class MyDocument(BaseDoc):
tensor: TorchTensor
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
def test_set_torch_embedding():
class MyDocument(BaseDoc):
embedding: TorchEmbedding
d = MyDocument(embedding=torch.zeros((128,)))
assert isinstance(d.embedding, TorchTensor)
assert isinstance(d.embedding, TorchEmbedding)
assert isinstance(d.embedding, torch.Tensor)
assert (d.embedding == torch.zeros((128,))).all()
def test_torchtensor_deepcopy():
# Setup
original_tensor_float = TorchTensor(torch.rand(10))
original_tensor_int = TorchTensor(torch.randint(0, 100, (10,)))
# Exercise
copied_tensor_float = copy.deepcopy(original_tensor_float)
copied_tensor_int = copy.deepcopy(original_tensor_int)
# Verify
assert torch.equal(original_tensor_float, copied_tensor_float)
assert original_tensor_float.data_ptr() != copied_tensor_float.data_ptr()
assert torch.equal(original_tensor_int, copied_tensor_int)
assert original_tensor_int.data_ptr() != copied_tensor_int.data_ptr()
|
import torch
from docarray.typing.tensor.torch_tensor import TorchTensor
import copy
from docarray import BaseDoc
from docarray.typing import TorchEmbedding, TorchTensor
def test_set_torch_tensor():
class MyDocument(BaseDoc):
tensor: TorchTensor
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
def test_set_torch_embedding():
class MyDocument(BaseDoc):
embedding: TorchEmbedding
d = MyDocument(embedding=torch.zeros((128,)))
assert isinstance(d.embedding, TorchTensor)
assert isinstance(d.embedding, TorchEmbedding)
assert isinstance(d.embedding, torch.Tensor)
assert (d.embedding == torch.zeros((128,))).all()
def test_torchtensor_deepcopy():
# Setup
original_tensor_float = TorchTensor(torch.rand(10))
original_tensor_int = TorchTensor(torch.randint(0, 100, (10,)))
# Exercise
copied_tensor_float = copy.deepcopy(original_tensor_float)
copied_tensor_int = copy.deepcopy(original_tensor_int)
# Verify
assert torch.equal(original_tensor_float, copied_tensor_float)
assert original_tensor_float is not copied_tensor_float
assert torch.equal(original_tensor_int, copied_tensor_int)
assert original_tensor_int is not copied_tensor_int
|
from keras.src import ops
from keras.src import quantizers
from keras.src import random
from keras.src import testing
class QuantizersTest(testing.TestCase):
def test_get_method(self):
quantizer = quantizers.get("abs_max_quantizer", axis=-1)
self.assertTrue(quantizer, quantizers.AbsMaxQuantizer)
quantizer = quantizers.get(None)
self.assertEqual(quantizer, None)
with self.assertRaises(ValueError):
quantizers.get("typo")
def test_abs_max_quantizer(self):
values = random.uniform([3, 4, 5], minval=-1, maxval=1, dtype="float32")
quantizer = quantizers.AbsMaxQuantizer(axis=-1)
# Test quantizing
quantized_values, scale = quantizer(values)
self.assertDType(quantized_values, "int8")
self.assertDType(scale, "float32")
self.assertEqual(tuple(quantized_values.shape), (3, 4, 5))
self.assertEqual(tuple(scale.shape), (3, 4, 1))
self.assertLessEqual(ops.max(quantized_values), 127)
self.assertGreaterEqual(ops.min(quantized_values), -127)
# Test dequantizing
dequantized_values = ops.divide(quantized_values, scale)
rmse = ops.sqrt(
ops.mean(ops.square(ops.subtract(values, dequantized_values)))
)
self.assertLess(rmse, 1e-1) # loose assertion
# Test serialization
self.run_class_serialization_test(quantizer)
# Test bfloat16 & float16 dtype
values = random.uniform(
[3, 4, 5], minval=-1, maxval=1, dtype="bfloat16"
)
quantized_values, scale = quantizer(values)
self.assertDType(quantized_values, "int8")
self.assertDType(scale, "bfloat16")
values = random.uniform([3, 4, 5], minval=-1, maxval=1, dtype="float16")
quantized_values, scale = quantizer(values)
self.assertDType(quantized_values, "int8")
self.assertDType(scale, "float16")
def test_abs_max_quantizer_to_numpy(self):
values = random.uniform([3, 4, 5], minval=-1, maxval=1, dtype="float32")
quantized_values, scale = quantizers.abs_max_quantize(
values, axis=-1, to_numpy=True
)
ref_quantized_values, ref_scale = quantizers.abs_max_quantize(
values, axis=-1
)
self.assertAllClose(quantized_values, ref_quantized_values)
self.assertAllClose(scale, ref_scale)
def test_compute_float8_scale(self):
amax = 3.0
scale = 4.0
dtype_max = 448.0 # float8_e4m3fn
# The algorithm for computing the new scale is sourced from
# https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/jax.html#transformer_engine.jax.update_fp8_metas
expected_scale = 1.0 / (dtype_max / amax) / (2**0)
computed_scale = quantizers.compute_float8_scale(amax, scale, dtype_max)
self.assertAllClose(computed_scale, expected_scale)
def test_compute_float8_amax_history(self):
values = random.uniform([3, 4, 5], minval=-1, maxval=1)
amax_history = random.uniform([123])
amax_from_values = ops.max(ops.abs(values))
computed_amax_history = quantizers.compute_float8_amax_history(
values, amax_history
)
self.assertAllClose(computed_amax_history[0], amax_from_values)
# Shift to left with 1 step
self.assertAllClose(
computed_amax_history[1:], ops.roll(amax_history, -1)[1:]
)
def test_quantize_and_dequantize(self):
scale = 1.0 / 100.0
values = random.uniform([3, 4, 5], minval=-1, maxval=1)
qdq_values = quantizers.quantize_and_dequantize(
values, scale, "float8_e4m3fn", "float32"
)
# A loose assertion due to an expected quantization error
self.assertAllClose(qdq_values, values, atol=1e-1)
qdq_values = quantizers.quantize_and_dequantize(
values, scale, "float8_e5m2", "float32"
)
# A loose assertion due to an expected quantization error
self.assertAllClose(qdq_values, values, atol=5e-1)
|
from keras.src import ops
from keras.src import quantizers
from keras.src import random
from keras.src import testing
class QuantizersTest(testing.TestCase):
def test_get_method(self):
quantizer = quantizers.get("abs_max_quantizer", axis=-1)
self.assertTrue(quantizer, quantizers.AbsMaxQuantizer)
quantizer = quantizers.get(None)
self.assertEqual(quantizer, None)
with self.assertRaises(ValueError):
quantizers.get("typo")
def test_abs_max_quantizer(self):
values = random.uniform([3, 4, 5], minval=-1, maxval=1)
quantizer = quantizers.AbsMaxQuantizer(axis=-1)
# Test quantizing
quantized_values, scale = quantizer(values)
self.assertEqual(tuple(quantized_values.shape), (3, 4, 5))
self.assertEqual(tuple(scale.shape), (3, 4, 1))
self.assertLessEqual(ops.max(quantized_values), 127)
self.assertGreaterEqual(ops.min(quantized_values), -127)
# Test dequantizing
dequantized_values = ops.divide(quantized_values, scale)
rmse = ops.sqrt(
ops.mean(ops.square(ops.subtract(values, dequantized_values)))
)
self.assertLess(rmse, 1e-1) # loose assertion
# Test serialization
self.run_class_serialization_test(quantizer)
def test_compute_float8_scale(self):
amax = 3.0
scale = 4.0
dtype_max = 448.0 # float8_e4m3fn
# The algorithm for computing the new scale is sourced from
# https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/jax.html#transformer_engine.jax.update_fp8_metas
expected_scale = 1.0 / (dtype_max / amax) / (2**0)
computed_scale = quantizers.compute_float8_scale(amax, scale, dtype_max)
self.assertAllClose(computed_scale, expected_scale)
def test_compute_float8_amax_history(self):
values = random.uniform([3, 4, 5], minval=-1, maxval=1)
amax_history = random.uniform([123])
amax_from_values = ops.max(ops.abs(values))
computed_amax_history = quantizers.compute_float8_amax_history(
values, amax_history
)
self.assertAllClose(computed_amax_history[0], amax_from_values)
# Shift to left with 1 step
self.assertAllClose(
computed_amax_history[1:], ops.roll(amax_history, -1)[1:]
)
def test_quantize_and_dequantize(self):
scale = 1.0 / 100.0
values = random.uniform([3, 4, 5], minval=-1, maxval=1)
qdq_values = quantizers.quantize_and_dequantize(
values, scale, "float8_e4m3fn", "float32"
)
# A loose assertion due to an expected quantization error
self.assertAllClose(qdq_values, values, atol=1e-1)
qdq_values = quantizers.quantize_and_dequantize(
values, scale, "float8_e5m2", "float32"
)
# A loose assertion due to an expected quantization error
self.assertAllClose(qdq_values, values, atol=5e-1)
|
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
def test_from_to_json():
da = DocArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
json_da = da.to_json()
da2 = DocArray[MyDoc].from_json(json_da)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
|
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: ImageDoc
def test_from_to_json():
da = DocumentArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
json_da = da.to_json()
da2 = DocumentArray[MyDoc].from_json(json_da)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
|
import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.data import InstanceData
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
bboxes = torch.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
class TestDetLocalVisualizer(TestCase):
def test_add_datasample(self):
h = 12
w = 10
num_class = 3
num_bboxes = 5
out_file = 'out_file.jpg'
image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')
# test gt_instances
gt_instances = InstanceData()
gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
gt_instances.labels = torch.randint(0, num_class, (5, ))
gt_det_data_sample = DetDataSample()
gt_det_data_sample.gt_instances = gt_instances
det_local_visualizer = DetLocalVisualizer()
det_local_visualizer.add_datasample('image', image, gt_det_data_sample)
# test out_file
det_local_visualizer.add_datasample(
'image', image, gt_det_data_sample, out_file=out_file)
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == (h, w, 3)
os.remove(out_file)
# test gt_instances and pred_instances
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
pred_instances.labels = torch.randint(0, num_class, (5, ))
pred_instances.scores = torch.rand((5, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.pred_instances = pred_instances
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
draw_gt=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
draw_pred=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
# TODO: test gt_panoptic_seg
def _assert_image_and_shape(self, out_file, out_shape):
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == out_shape
os.remove(out_file)
|
import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.data import InstanceData
from mmdet.data_elements import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
bboxes = torch.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
class TestDetLocalVisualizer(TestCase):
def test_add_datasample(self):
h = 12
w = 10
num_class = 3
num_bboxes = 5
out_file = 'out_file.jpg'
image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')
# test gt_instances
gt_instances = InstanceData()
gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
gt_instances.labels = torch.randint(0, num_class, (5, ))
gt_det_data_sample = DetDataSample()
gt_det_data_sample.gt_instances = gt_instances
det_local_visualizer = DetLocalVisualizer()
det_local_visualizer.add_datasample('image', image, gt_det_data_sample)
# test out_file
det_local_visualizer.add_datasample(
'image', image, gt_det_data_sample, out_file=out_file)
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == (h, w, 3)
os.remove(out_file)
# test gt_instances and pred_instances
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
pred_instances.labels = torch.randint(0, num_class, (5, ))
pred_instances.scores = torch.rand((5, ))
pred_det_data_sample = DetDataSample()
pred_det_data_sample.pred_instances = pred_instances
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
draw_gt=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
det_local_visualizer.add_datasample(
'image',
image,
gt_det_data_sample,
pred_det_data_sample,
draw_pred=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
# TODO: test gt_panoptic_seg
def _assert_image_and_shape(self, out_file, out_shape):
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == out_shape
os.remove(out_file)
|
import os
import numpy as np
import keras
from keras.src import testing
from keras.src.saving.file_editor import KerasFileEditor
def get_source_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
def get_target_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
x = keras.layers.Dense(3, name="myotherdense")(x)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
class SavingTest(testing.TestCase):
def test_basics(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = get_source_model()
model.save(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
target_model = get_target_model()
out = editor.compare_to(model) # Succeeds
self.assertEqual(out["status"], "success")
out = editor.compare_to(target_model) # Fails
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 2)
editor.rename_object("dense_3", "dense_4")
editor.rename_object("layers/dense_4", "dense_2")
editor.add_weights("dense_2", weights={"1": np.random.random((3,))})
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_object("layers/dense_3")
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.summary()
temp_filepath = os.path.join(self.get_temp_dir(), "resaved.weights.h5")
editor.resave_weights(temp_filepath)
target_model.load_weights(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.delete_weight("dense_2", "1")
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.add_weights("dense_2", {"1": np.zeros((7,))})
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_weight("dense_2", "1")
editor.add_weights("dense_2", {"1": np.zeros((3,))})
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
|
import os
import numpy as np
import keras
from keras.src import testing
from keras.src.saving.file_editor import KerasFileEditor
def get_source_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
def get_target_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
x = keras.layers.Dense(3, name="myotherdense")(x)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
class SavingTest(testing.TestCase):
def test_basics(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = get_source_model()
model.save(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
editor._weights_summary_interactive()
target_model = get_target_model()
out = editor.compare_to(model) # Succeeds
self.assertEqual(out["status"], "success")
out = editor.compare_to(target_model) # Fails
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 2)
editor.rename_object("dense_3", "dense_4")
editor.rename_object("layers/dense_4", "dense_2")
editor.add_weights("dense_2", weights={"1": np.random.random((3,))})
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_object("layers/dense_3")
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.summary()
temp_filepath = os.path.join(self.get_temp_dir(), "resaved.weights.h5")
editor.resave_weights(temp_filepath)
target_model.load_weights(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.delete_weight("dense_2", "1")
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.add_weights("dense_2", {"1": np.zeros((7,))})
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_weight("dense_2", "1")
editor.add_weights("dense_2", {"1": np.zeros((3,))})
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 48
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[32, 44],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline))
# learning policy
lr_config = dict(step=[32, 44])
runner = dict(type='EpochBasedRunner', max_epochs=48)
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .hooks import *
from .logging import *
from .registry import *
from .runner import *
from .utils import *
from .visualization import *
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .hooks import *
from .logging import *
from .registry import *
from .runner import *
from .utils import *
|
import pytest
from jina import Document, DocumentArray, Flow
from ...text_paddle import TextPaddleEncoder
@pytest.fixture(scope='function')
def flow():
return Flow().add(uses=TextPaddleEncoder)
@pytest.fixture(scope='function')
def content():
return 'hello world'
@pytest.fixture(scope='function')
def document_array(content):
return DocumentArray([Document(content=content)])
def validate_callback(mock, validate_func):
for args, kwargs in mock.call_args_list:
validate_func(*args, **kwargs)
mock.assert_called()
@pytest.mark.parametrize(
'parameters',
[
{'traverse_paths': ['r'], 'batch_size': 10},
{'traverse_paths': ['m'], 'batch_size': 10},
{'traverse_paths': ['r', 'c'], 'batch_size': 5},
],
)
def test_text_paddle(flow, content, document_array, parameters, mocker):
def validate(resp):
for doc in resp.docs:
assert doc.embedding.shape == (1024,)
assert doc.embedding.all()
mock_on_done = mocker.Mock()
with flow as f:
f.index(inputs=document_array, on_done=mock_on_done)
validate_callback(mock_on_done, validate)
|
import pytest
from jina import Document, DocumentArray, Flow
from jinahub.encoder.text_paddle import TextPaddleEncoder
@pytest.fixture(scope='function')
def flow():
return Flow().add(uses=TextPaddleEncoder)
@pytest.fixture(scope='function')
def content():
return 'hello world'
@pytest.fixture(scope='function')
def document_array(content):
return DocumentArray([Document(content=content)])
def validate_callback(mock, validate_func):
for args, kwargs in mock.call_args_list:
validate_func(*args, **kwargs)
mock.assert_called()
@pytest.mark.parametrize(
'parameters',
[
{'traverse_paths': ['r'], 'batch_size': 10},
{'traverse_paths': ['m'], 'batch_size': 10},
{'traverse_paths': ['r', 'c'], 'batch_size': 5},
],
)
def test_text_paddle(flow, content, document_array, parameters, mocker):
def validate(resp):
for doc in resp.docs:
assert doc.embedding.shape == (1024,)
assert doc.embedding.all()
mock_on_done = mocker.Mock()
with flow as f:
f.index(inputs=document_array, on_done=mock_on_done)
validate_callback(mock_on_done, validate)
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from torch.autograd import gradcheck
from mmdet.models.utils import interpolate_as, sigmoid_geometric_mean
def test_interpolate_as():
source = torch.rand((1, 5, 4, 4))
target = torch.rand((1, 1, 16, 16))
# Test 4D source and target
result = interpolate_as(source, target)
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D target
result = interpolate_as(source, target.squeeze(0))
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D source
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
# Test type(target) == np.ndarray
target = np.random.rand(16, 16)
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
def test_sigmoid_geometric_mean():
x = torch.randn(20, 20, dtype=torch.double, requires_grad=True)
y = torch.randn(20, 20, dtype=torch.double, requires_grad=True)
inputs = (x, y)
test = gradcheck(sigmoid_geometric_mean, inputs, eps=1e-6, atol=1e-4)
assert test
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.models.utils import interpolate_as
def test_interpolate_as():
source = torch.rand((1, 5, 4, 4))
target = torch.rand((1, 1, 16, 16))
# Test 4D source and target
result = interpolate_as(source, target)
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D target
result = interpolate_as(source, target.squeeze(0))
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D source
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
# Test type(target) == np.ndarray
target = np.random.rand(16, 16)
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
|
"""Scrapfly Web Reader."""
import logging
from typing import List, Optional, Literal
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
logger = logging.getLogger(__file__)
class ScrapflyReader(BasePydanticReader):
"""
Turn a url to llm accessible markdown with `Scrapfly.io`.
Args:
api_key: The Scrapfly API key.
scrape_config: The Scrapfly ScrapeConfig object.
ignore_scrape_failures: Whether to continue on failures.
urls: List of urls to scrape.
scrape_format: Scrape result format (markdown or text)
For further details, visit: https://scrapfly.io/docs/sdk/python
"""
api_key: str
ignore_scrape_failures: bool = True
scrapfly: Optional["ScrapflyClient"] = None # Declare the scrapfly attribute
def __init__(self, api_key: str, ignore_scrape_failures: bool = True) -> None:
"""Initialize client."""
super().__init__(api_key=api_key, ignore_scrape_failures=ignore_scrape_failures)
try:
from scrapfly import ScrapflyClient
except ImportError:
raise ImportError(
"`scrapfly` package not found, please run `pip install scrapfly-sdk`"
)
self.scrapfly = ScrapflyClient(key=api_key)
@classmethod
def class_name(cls) -> str:
return "Scrapfly_reader"
def load_data(
self,
urls: List[str],
scrape_format: Literal["markdown", "text"] = "markdown",
scrape_config: Optional[dict] = None,
) -> List[Document]:
"""
Load data from the urls.
Args:
urls: List[str]): List of URLs to scrape.
scrape_config: Optional[dict]: Dictionary of ScrapFly scrape config object.
Returns:
List[Document]: List of documents.
Raises:
ValueError: If URLs aren't provided.
"""
from scrapfly import ScrapeApiResponse, ScrapeConfig
if urls is None:
raise ValueError("URLs must be provided.")
scrape_config = scrape_config if scrape_config is not None else {}
documents = []
for url in urls:
try:
response: ScrapeApiResponse = self.scrapfly.scrape(
ScrapeConfig(url, format=scrape_format, **scrape_config)
)
documents.append(
Document(
text=response.scrape_result["content"], extra_info={"url": url}
)
)
except Exception as e:
if self.ignore_scrape_failures:
logger.error(f"Error fetching data from {url}, exception: {e}")
else:
raise e # noqa: TRY201
return documents
|
"""Scrapfly Web Reader."""
import logging
from typing import List, Optional, Literal
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
logger = logging.getLogger(__file__)
class ScrapflyReader(BasePydanticReader):
"""Turn a url to llm accessible markdown with `Scrapfly.io`.
Args:
api_key: The Scrapfly API key.
scrape_config: The Scrapfly ScrapeConfig object.
ignore_scrape_failures: Whether to continue on failures.
urls: List of urls to scrape.
scrape_format: Scrape result format (markdown or text)
For further details, visit: https://scrapfly.io/docs/sdk/python
"""
api_key: str
ignore_scrape_failures: bool = True
scrapfly: Optional["ScrapflyClient"] = None # Declare the scrapfly attribute
def __init__(self, api_key: str, ignore_scrape_failures: bool = True) -> None:
"""Initialize client."""
super().__init__(api_key=api_key, ignore_scrape_failures=ignore_scrape_failures)
try:
from scrapfly import ScrapflyClient
except ImportError:
raise ImportError(
"`scrapfly` package not found, please run `pip install scrapfly-sdk`"
)
self.scrapfly = ScrapflyClient(key=api_key)
@classmethod
def class_name(cls) -> str:
return "Scrapfly_reader"
def load_data(
self,
urls: List[str],
scrape_format: Literal["markdown", "text"] = "markdown",
scrape_config: Optional[dict] = None,
) -> List[Document]:
"""Load data from the urls.
Args:
urls: List[str]): List of URLs to scrape.
scrape_config: Optional[dict]: Dictionary of ScrapFly scrape config object.
Returns:
List[Document]: List of documents.
Raises:
ValueError: If URLs aren't provided.
"""
from scrapfly import ScrapeApiResponse, ScrapeConfig
if urls is None:
raise ValueError("URLs must be provided.")
scrape_config = scrape_config if scrape_config is not None else {}
documents = []
for url in urls:
try:
response: ScrapeApiResponse = self.scrapfly.scrape(
ScrapeConfig(url, format=scrape_format, **scrape_config)
)
documents.append(
Document(
text=response.scrape_result["content"], extra_info={"url": url}
)
)
except Exception as e:
if self.ignore_scrape_failures:
logger.error(f"Error fetching data from {url}, exception: {e}")
else:
raise e # noqa: TRY201
return documents
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.computation import AbstractComputationalBackend
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.proto import NdArrayProto
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(Generic[ShapeT], AbstractType, ABC):
__parametrized_meta__ = type
_PROTO_FIELD_NAME: str
@classmethod
@abc.abstractmethod
def __docarray_validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form AnyTensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type AnyTensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __docarray_validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form AnyTensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _docarray_create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__docarray_validate_shape__(t, _cls._docarray_target_shape)
_ParametrizedTensor.__name__ = f'{cls.__name__}[{shape_str}]'
_ParametrizedTensor.__qualname__ = f'{cls.__qualname__}[{shape_str}]'
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__docarray_validate_getitem__(item)
return cls._docarray_create_parametrized_type(target_shape)
@classmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
comp_backend = cls.get_comp_backend()
# at runtime, 'T' is always the correct input type for .stack()
# but mypy doesn't know that, so we ignore it here
return cls.__docarray_from_native__(comp_backend.stack(seq)) # type: ignore
@classmethod
@abc.abstractmethod
def __docarray_from_native__(cls: Type[T], value: Any) -> T:
"""
Create a DocArray tensor from a tensor that is native to the given framework,
e.g. from numpy.ndarray or torch.Tensor.
"""
...
@staticmethod
@abc.abstractmethod
def get_comp_backend() -> Type[AbstractComputationalBackend]:
"""The computational backend compatible with this tensor type."""
...
@abc.abstractmethod
def __getitem__(self, item):
"""Get a slice of this tensor."""
...
def __iter__(self):
"""Iterate over the elements of this tensor."""
...
@abc.abstractmethod
def to_protobuf(self) -> 'NdArrayProto':
"""Convert DocumentArray into a Protobuf message"""
...
def unwrap(self):
"""Return the native tensor object that this DocArray tensor wraps."""
...
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.computation import AbstractComputationalBackend
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
_PROTO_FIELD_NAME: str
@classmethod
@abc.abstractmethod
def __docarray_validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form AnyTensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type AnyTensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __docarray_validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form AnyTensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _docarray_create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__docarray_validate_shape__(t, _cls._docarray_target_shape)
_ParametrizedTensor.__name__ = f'{cls.__name__}[{shape_str}]'
_ParametrizedTensor.__qualname__ = f'{cls.__qualname__}[{shape_str}]'
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__docarray_validate_getitem__(item)
return cls._docarray_create_parametrized_type(target_shape)
@classmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
comp_backend = cls.get_comp_backend()
# at runtime, 'T' is always the correct input type for .stack()
# but mypy doesn't know that, so we ignore it here
return cls.__docarray_from_native__(comp_backend.stack(seq)) # type: ignore
@classmethod
@abc.abstractmethod
def __docarray_from_native__(cls: Type[T], value: Any) -> T:
"""
Create a DocArray tensor from a tensor that is native to the given framework,
e.g. from numpy.ndarray or torch.Tensor.
"""
...
@staticmethod
@abc.abstractmethod
def get_comp_backend() -> Type[AbstractComputationalBackend]:
"""The computational backend compatible with this tensor type."""
...
|
from functools import partial
from torchaudio.models import hdemucs_high
from torchaudio.pipelines import SourceSeparationBundle
HDEMUCS_HIGH_MUSDB_PLUS = SourceSeparationBundle(
_model_path="models/hdemucs_high_trained.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB_PLUS.__doc__ = """Pre-trained *Hybrid Demucs* [:footcite:`defossez2021hybrid`] pipeline for music
source separation. The underlying model is constructed by
:py:func:`torchaudio.prototype.models.hdemucs_high` and utilizes weights trained on MUSDB-HQ [:footcite:`MUSDB18HQ`]
and internal extra training data, all at the same sample rate of 44.1 kHZ. The model separates mixture music into
“drums”, “base”, “vocals”, and “other” sources. Training was performed in the original HDemucs repository
`here <https://github.com/facebookresearch/demucs/>`__.
"""
HDEMUCS_HIGH_MUSDB = SourceSeparationBundle(
_model_path="models/hdemucs_high_musdbhq_only.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB.__doc__ = """Pre-trained *Hybrid Demucs* [:footcite:`defossez2021hybrid`] pipeline for music
source separation. The underlying model is constructed by
:py:func:`torchaudio.prototype.models.hdemucs_high` and utilizes weights trained on only
MUSDB-HQ [:footcite:`MUSDB18HQ`] at the same sample rate of 44.1 kHZ. The model separates mixture music into
“drums”, “base”, “vocals”, and “other” sources. Training was performed in the original HDemucs repository
`here <https://github.com/facebookresearch/demucs/>`__.
"""
|
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.models import hdemucs_high
from torchaudio.prototype.models import conv_tasnet_base
@dataclass
class SourceSeparationBundle:
"""torchaudio.prototype.pipelines.SourceSeparationBundle()
Dataclass that bundles components for performing source separation.
Example
>>> import torchaudio
>>> from torchaudio.prototype.pipelines import CONVTASNET_BASE_LIBRI2MIX
>>> import torch
>>>
>>> # Build the separation model.
>>> model = CONVTASNET_BASE_LIBRI2MIX.get_model()
>>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s]
>>>
>>> # Instantiate the test set of Libri2Mix dataset.
>>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test")
>>>
>>> # Apply source separation on mixture audio.
>>> for i, data in enumerate(dataset):
>>> sample_rate, mixture, clean_sources = data
>>> # Make sure the shape of input suits the model requirement.
>>> mixture = mixture.reshape(1, 1, -1)
>>> estimated_sources = model(mixture)
>>> score = si_snr_pit(estimated_sources, clean_sources) # for demonstration
>>> print(f"Si-SNR score is : {score}.)
>>> break
>>> Si-SNR score is : 16.24.
>>>
"""
_model_path: str
_model_factory_func: Callable[[], torch.nn.Module]
_sample_rate: int
@property
def sample_rate(self) -> int:
"""Sample rate of the audio that the model is trained on.
:type: int
"""
return self._sample_rate
def get_model(self) -> torch.nn.Module:
"""Construct the model and load the pretrained weight."""
model = self._model_factory_func()
path = torchaudio.utils.download_asset(self._model_path)
state_dict = torch.load(path)
model.load_state_dict(state_dict)
model.eval()
return model
CONVTASNET_BASE_LIBRI2MIX = SourceSeparationBundle(
_model_path="models/conv_tasnet_base_libri2mix.pt",
_model_factory_func=partial(conv_tasnet_base, num_sources=2),
_sample_rate=8000,
)
CONVTASNET_BASE_LIBRI2MIX.__doc__ = """Pre-trained *ConvTasNet* [:footcite:`Luo_2019`] pipeline for source separation.
The underlying model is constructed by :py:func:`torchaudio.prototyoe.models.conv_tasnet_base`
and utilizes weights trained on *Libri2Mix dataset* [:footcite:`cosentino2020librimix`] using training script
``lightning_train.py`` `here <https://github.com/pytorch/audio/tree/release/0.12/examples/source_separation/>`__
with default arguments.
Please refer to :py:class:`SourceSeparationBundle` for usage instructions.
"""
HDEMUCS_HIGH_MUSDB_PLUS = SourceSeparationBundle(
_model_path="models/hdemucs_high_trained.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB_PLUS.__doc__ = """Pre-trained *Hybrid Demucs* [:footcite:`defossez2021hybrid`] pipeline for music
source separation. The underlying model is constructed by
:py:func:`torchaudio.prototype.models.hdemucs_high` and utilizes weights trained on MUSDB-HQ [:footcite:`MUSDB18HQ`]
and internal extra training data, all at the same sample rate of 44.1 kHZ. The model separates mixture music into
“drums”, “base”, “vocals”, and “other” sources. Training was performed in the original HDemucs repository
`here <https://github.com/facebookresearch/demucs/>`__.
"""
HDEMUCS_HIGH_MUSDB = SourceSeparationBundle(
_model_path="models/hdemucs_high_musdbhq_only.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB.__doc__ = """Pre-trained *Hybrid Demucs* [:footcite:`defossez2021hybrid`] pipeline for music
source separation. The underlying model is constructed by
:py:func:`torchaudio.prototype.models.hdemucs_high` and utilizes weights trained on only
MUSDB-HQ [:footcite:`MUSDB18HQ`] at the same sample rate of 44.1 kHZ. The model separates mixture music into
“drums”, “base”, “vocals”, and “other” sources. Training was performed in the original HDemucs repository
`here <https://github.com/facebookresearch/demucs/>`__.
"""
|
from abc import abstractmethod
from typing import Protocol
# NOTE: This is a bare-bone suggestion for an abstract protocol to define GraphRAG for llama-index
# This should be expanded upon and integrated to llama-index-core to support multiple different GraphRAG
# libraries in the future
class GraphRAG(Protocol):
"""
Abstract graph RAG protocol.
This protocol defines the interface for a graphRAG, which is responsible
for adding, storing, processing and retrieving information from knowledge graphs.
Attributes:
llm_api_key: str: Api key for desired llm.
graph_db_provider: str: The graph database provider.
vector_db_provider: str: The vector database provider.
relational_db_provider: str: The relational database provider.
db_name: str: The name of the databases.
"""
@abstractmethod
async def add(self, data, dataset_name):
"""
Add data to the specified dataset.
This data will later be processed and made into a knowledge graph.
Args:
data (Any): The data to be added to the graph.
dataset_name (str): Name of the dataset or node set where the data will be added.
"""
@abstractmethod
async def process_data(self, dataset_name: str):
"""
Process and structure data in the dataset and make a knowledge graph out of it.
Args:
dataset_name (str): The dataset name to process.
"""
@abstractmethod
async def search(self, query: str):
"""
Search the graph for relevant information based on a query.
Args:
query (str): The query string to match against data from the graph.
"""
@abstractmethod
async def get_related_nodes(self, node_id: str):
"""
Search the graph for relevant nodes or relationships based on node id.
Args:
node_id (str): The name of the node to match against nodes in the graph.
"""
|
from abc import abstractmethod
from typing import Protocol
# NOTE: This is a bare-bone suggestion for an abstract protocol to define GraphRAG for llama-index
# This should be expanded upon and integrated to llama-index-core to support multiple different GraphRAG
# libraries in the future
class GraphRAG(Protocol):
"""Abstract graph RAG protocol.
This protocol defines the interface for a graphRAG, which is responsible
for adding, storing, processing and retrieving information from knowledge graphs.
Attributes:
llm_api_key: str: Api key for desired llm.
graph_db_provider: str: The graph database provider.
vector_db_provider: str: The vector database provider.
relational_db_provider: str: The relational database provider.
db_name: str: The name of the databases.
"""
@abstractmethod
async def add(self, data, dataset_name):
"""Add data to the specified dataset.
This data will later be processed and made into a knowledge graph.
Args:
data (Any): The data to be added to the graph.
dataset_name (str): Name of the dataset or node set where the data will be added.
"""
@abstractmethod
async def process_data(self, dataset_name: str):
"""Process and structure data in the dataset and make a knowledge graph out of it.
Args:
dataset_name (str): The dataset name to process.
"""
@abstractmethod
async def search(self, query: str):
"""Search the graph for relevant information based on a query.
Args:
query (str): The query string to match against data from the graph.
"""
@abstractmethod
async def get_related_nodes(self, node_id: str):
"""Search the graph for relevant nodes or relationships based on node id.
Args:
node_id (str): The name of the node to match against nodes in the graph.
"""
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import librosa
import numpy as np
from jina import Document, DocumentArray, Executor
from ...audio_clip_encoder import AudioCLIPEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_path.endswith('AudioCLIP-Full-Training.pt')
def test_embedding_dimension():
x_audio, sample_rate = librosa.load(
Path(__file__).parents[1] / 'test_data/sample.wav'
)
docs = DocumentArray([Document(blob=x_audio)])
model = AudioCLIPEncoder()
model.encode(docs, parameters={})
assert docs[0].embedding.shape == (1024,)
def test_many_documents():
audio1, _ = librosa.load(Path(__file__).parents[1] / 'test_data/sample.mp3')
audio2, _ = librosa.load(Path(__file__).parents[1] / 'test_data/sample.wav')
docs = DocumentArray([Document(blob=audio1), Document(blob=audio2)])
encoder = AudioCLIPEncoder()
encoder.encode(docs, parameters={})
assert docs[0].embedding.shape == (1024,)
assert docs[1].embedding.shape == (1024,)
def test_traversal_paths():
audio1, _ = librosa.load(Path(__file__).parents[1] / 'test_data/sample.mp3')
audio2, _ = librosa.load(Path(__file__).parents[1] / 'test_data/sample.wav')
audio1_chunks = np.split(audio1, 4)
audio2_chunks = np.split(audio2, 2)
docs = DocumentArray(
[
Document(
id='root1',
blob=audio1,
chunks=[
Document(id=f'chunk1{i}', blob=chunk)
for i, chunk in enumerate(audio1_chunks)
],
),
Document(
id='root2',
blob=audio2,
chunks=[
Document(id='chunk21', blob=audio2_chunks[0]),
Document(
id='chunk22',
blob=audio2_chunks[1],
chunks=[
Document(id=f'chunk22{i}', blob=chunk)
for i, chunk in enumerate(np.split(audio2_chunks[1], 3))
],
),
],
),
]
)
encoder = AudioCLIPEncoder(default_traversal_paths=['c'])
encoder.encode(docs, parameters={})
encoder.encode(docs, parameters={'traversal_paths': ['cc']})
for path, count in [['r', 0], ['c', 6], ['cc', 3]]:
embeddings = (
DocumentArray(docs).traverse_flat([path]).get_attributes('embedding')
)
assert all(embedding.shape == (1024,) for embedding in embeddings)
assert len(embeddings) == count
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import librosa
import numpy as np
from jina import Executor, Document, DocumentArray
from audio_clip_encoder import AudioCLIPEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_load():
encoder = Executor.load_config(os.path.join(cur_dir, '../../config.yml'))
assert encoder.model_path.endswith('AudioCLIP-Full-Training.pt')
def test_embedding_dimension():
x_audio, sample_rate = librosa.load(os.path.join(cur_dir, '../test_data/sample.wav'))
docs = DocumentArray([Document(blob=x_audio)])
model = AudioCLIPEncoder()
model.encode(docs, parameters={})
assert docs[0].embedding.shape == (1024, )
def test_many_documents():
audio1, _ = librosa.load(os.path.join(cur_dir, '../test_data/sample.mp3'))
audio2, _ = librosa.load(os.path.join(cur_dir, '../test_data/sample.wav'))
docs = DocumentArray([Document(blob=audio1), Document(blob=audio2)])
encoder = AudioCLIPEncoder()
encoder.encode(docs, parameters={})
assert docs[0].embedding.shape == (1024, )
assert docs[1].embedding.shape == (1024, )
def test_traversal_paths():
audio1, _ = librosa.load(os.path.join(cur_dir, '../test_data/sample.mp3'))
audio2, _ = librosa.load(os.path.join(cur_dir, '../test_data/sample.wav'))
audio1_chunks = np.split(audio1, 4)
audio2_chunks = np.split(audio2, 2)
docs = DocumentArray([
Document(
id='root1',
blob=audio1,
chunks=[
Document(id=f'chunk1{i}', blob=chunk) for i, chunk in enumerate(audio1_chunks)
]
),
Document(
id='root2',
blob=audio2,
chunks=[
Document(id='chunk21', blob=audio2_chunks[0]),
Document(id='chunk22', blob=audio2_chunks[1], chunks=[
Document(id=f'chunk22{i}', blob=chunk) for i, chunk in enumerate(np.split(audio2_chunks[1], 3))
])
]
)
])
encoder = AudioCLIPEncoder(default_traversal_paths=['c'])
encoder.encode(docs, parameters={})
encoder.encode(docs, parameters={'traversal_paths': ['cc']})
for path, count in [['r', 0], ['c', 6], ['cc', 3]]:
embeddings = DocumentArray(docs).traverse_flat([path]).get_attributes('embedding')
assert all(
embedding.shape == (1024,)
for embedding in embeddings
)
assert len(embeddings) == count
|
from pathlib import Path
import librosa
import pytest
from executor.vggish import vggish_input
from executor.vggish_audio_encoder import VggishAudioEncoder
from jina import Document, DocumentArray, Executor
from tensorflow.python.framework import ops
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert str(ex.vgg_model_path).endswith('vggish_model.ckpt')
assert str(ex.pca_model_path).endswith('vggish_pca_params.ckpt')
def test_embedding_dimension():
x_audio, sample_rate = librosa.load(
Path(__file__).parents[1] / 'test_data/sample.wav'
)
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
ops.reset_default_graph()
model = VggishAudioEncoder()
model.encode(doc, parameters={})
assert doc[0].embedding.shape == (128,)
@pytest.mark.gpu
def test_embedding_dimension_gpu():
x_audio, sample_rate = librosa.load(
Path(__file__).parents[1] / 'test_data/sample.wav'
)
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
ops.reset_default_graph()
model = VggishAudioEncoder(device='/GPU:0')
model.encode(doc, parameters={})
assert doc[0].embedding.shape == (128,)
|
from pathlib import Path
import librosa
import pytest
from jina import Document, DocumentArray, Executor
from tensorflow.python.framework import ops
from ...vggish import vggish_input
from ...vggish_audio_encoder import VggishAudioEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert str(ex.vgg_model_path).endswith('vggish_model.ckpt')
assert str(ex.pca_model_path).endswith('vggish_pca_params.ckpt')
def test_embedding_dimension():
x_audio, sample_rate = librosa.load(
Path(__file__).parents[1] / 'test_data/sample.wav'
)
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
ops.reset_default_graph()
model = VggishAudioEncoder()
model.encode(doc, parameters={})
assert doc[0].embedding.shape == (128,)
@pytest.mark.gpu
def test_embedding_dimension_gpu():
x_audio, sample_rate = librosa.load(
Path(__file__).parents[1] / 'test_data/sample.wav'
)
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
ops.reset_default_graph()
model = VggishAudioEncoder(device='/GPU:0')
model.encode(doc, parameters={})
assert doc[0].embedding.shape == (128,)
|
from abc import ABC
from contextlib import ExitStack
from rich.table import Table
from jina.helper import CatchAllCleanupContextManager, get_internal_ip, get_public_ip
class BaseOrchestrator(ExitStack, ABC):
"""Base orchestrator class"""
def __enter__(self):
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, '_stop_event'):
self._stop_event.set()
super().__exit__(exc_type, exc_val, exc_tb)
def _init_table(self):
table = Table(
title=None, box=None, highlight=True, show_header=False, min_width=40
)
table.add_column('', justify='left')
table.add_column('', justify='right')
table.add_column('', justify='right')
return table
@property
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
if getattr(self, '_internal_ip', None):
return self._internal_ip
else:
self._internal_ip = get_internal_ip()
return self._internal_ip
@property
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
if getattr(self, '_public_ip', None):
return self._public_ip
else:
self._public_ip = get_public_ip()
return self._public_ip
@property
def _entity_id(self) -> str:
import uuid
if hasattr(self, '_entity_id_'):
return self._entity_id_
self._entity_id_ = uuid.uuid1().hex
return self._entity_id_
|
from abc import ABC
from contextlib import ExitStack
from rich.table import Table
from jina.helper import CatchAllCleanupContextManager, get_internal_ip, get_public_ip
class BaseOrchestrator(ExitStack, ABC):
"""Base orchestrator class"""
def __enter__(self):
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, '_stop_event'):
self._stop_event.set()
super().__exit__(exc_type, exc_val, exc_tb)
def _init_table(self):
table = Table(
title=None, box=None, highlight=True, show_header=False, min_width=40
)
table.add_column('', justify='left')
table.add_column('', justify='right')
table.add_column('', justify='right')
return table
@property
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
if getattr(self, '_internal_ip', None):
return self._internal_ip
else:
self._internal_ip = get_internal_ip()
return self._internal_ip
@property
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
if getattr(self, '_public_ip', None):
return self._public_ip
else:
self._public_ip = get_public_ip()
return self._public_ip
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.testing import demo_track_inputs, random_boxes
from mmdet.utils import register_all_modules
class TestByteTracker(TestCase):
@classmethod
def setUpClass(cls):
register_all_modules(init_default_scope=True)
cfg = dict(
type='ByteTracker',
motion=dict(type='KalmanFilter'),
obj_score_thrs=dict(high=0.6, low=0.1),
init_track_thr=0.7,
weight_iou_with_det_scores=True,
match_iou_thrs=dict(high=0.1, low=0.5, tentative=0.3),
num_tentatives=3,
num_frames_retain=30)
cls.tracker = MODELS.build(cfg)
cls.tracker.kf = TASK_UTILS.build(dict(type='KalmanFilter'))
cls.num_frames_retain = cfg['num_frames_retain']
cls.num_objs = 30
def test_init(self):
bboxes = random_boxes(self.num_objs, 512)
labels = torch.zeros(self.num_objs)
scores = torch.ones(self.num_objs)
ids = torch.arange(self.num_objs)
self.tracker.update(
ids=ids, bboxes=bboxes, scores=scores, labels=labels, frame_ids=0)
assert self.tracker.ids == list(ids)
assert self.tracker.memo_items == [
'ids', 'bboxes', 'scores', 'labels', 'frame_ids'
]
def test_track(self):
with torch.no_grad():
packed_inputs = demo_track_inputs(batch_size=1, num_frames=2)
track_data_sample = packed_inputs['data_samples'][0]
video_len = len(track_data_sample)
for frame_id in range(video_len):
img_data_sample = track_data_sample[frame_id]
img_data_sample.pred_instances = \
img_data_sample.gt_instances.clone()
# add fake scores
scores = torch.ones(len(img_data_sample.gt_instances.bboxes))
img_data_sample.pred_instances.scores = torch.FloatTensor(
scores)
pred_track_instances = self.tracker.track(
data_sample=img_data_sample)
bboxes = pred_track_instances.bboxes
labels = pred_track_instances.labels
assert bboxes.shape[1] == 4
assert bboxes.shape[0] == labels.shape[0]
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.testing import demo_track_inputs, random_boxes
from mmdet.utils import register_all_modules
class TestByteTracker(TestCase):
@classmethod
def setUpClass(cls):
register_all_modules(init_default_scope=True)
cfg = dict(
type='ByteTracker',
motion=dict(type='KalmanFilter'),
obj_score_thrs=dict(high=0.6, low=0.1),
init_track_thr=0.7,
weight_iou_with_det_scores=True,
match_iou_thrs=dict(high=0.1, low=0.5, tentative=0.3),
num_tentatives=3,
num_frames_retain=30)
cls.tracker = MODELS.build(cfg)
cls.tracker.kf = TASK_UTILS.build(dict(type='KalmanFilter'))
cls.num_frames_retain = cfg['num_frames_retain']
cls.num_objs = 30
def test_init(self):
bboxes = random_boxes(self.num_objs, 512)
labels = torch.zeros(self.num_objs)
scores = torch.ones(self.num_objs)
ids = torch.arange(self.num_objs)
self.tracker.update(
ids=ids, bboxes=bboxes, scores=scores, labels=labels, frame_ids=0)
assert self.tracker.ids == list(ids)
assert self.tracker.memo_items == [
'ids', 'bboxes', 'scores', 'labels', 'frame_ids'
]
def test_track(self):
with torch.no_grad():
packed_inputs = demo_track_inputs(batch_size=1, num_frames=2)
track_data_sample = packed_inputs['data_samples'][0]
video_len = len(track_data_sample)
for frame_id in range(video_len):
img_data_sample = track_data_sample[frame_id]
img_data_sample.pred_instances = \
img_data_sample.gt_instances.clone()
# add fake scores
scores = torch.ones(5)
img_data_sample.pred_instances.scores = torch.FloatTensor(
scores)
pred_track_instances = self.tracker.track(
data_sample=img_data_sample)
bboxes = pred_track_instances.bboxes
labels = pred_track_instances.labels
assert bboxes.shape[1] == 4
assert bboxes.shape[0] == labels.shape[0]
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from tempfile import TemporaryDirectory
from unittest import TestCase, skipIf
from mmengine.registry import (Registry, count_registered_modules, root,
traverse_registry_tree)
from mmengine.utils import is_installed
class TestUtils(TestCase):
def test_traverse_registry_tree(self):
# Hierarchical Registry
# DOGS
# _______|_______
# | |
# HOUNDS (hound) SAMOYEDS (samoyed)
# _______|_______ |
# | | |
# LITTLE_HOUNDS MID_HOUNDS LITTLE_SAMOYEDS
# (little_hound) (mid_hound) (little_samoyed)
DOGS = Registry('dogs')
HOUNDS = Registry('dogs', parent=DOGS, scope='hound')
LITTLE_HOUNDS = Registry( # noqa
'dogs', parent=HOUNDS, scope='little_hound')
MID_HOUNDS = Registry('dogs', parent=HOUNDS, scope='mid_hound')
SAMOYEDS = Registry('dogs', parent=DOGS, scope='samoyed')
LITTLE_SAMOYEDS = Registry( # noqa
'dogs', parent=SAMOYEDS, scope='little_samoyed')
@DOGS.register_module()
class GoldenRetriever:
pass
# traversing the tree from the root
result = traverse_registry_tree(DOGS)
self.assertEqual(result[0]['num_modules'], 1)
self.assertEqual(len(result), 6)
# traversing the tree from leaf node
result_leaf = traverse_registry_tree(MID_HOUNDS)
# result from any node should be the same
self.assertEqual(result, result_leaf)
@skipIf(not is_installed('torch'), 'tests requires torch')
def test_count_all_registered_modules(self):
temp_dir = TemporaryDirectory()
results = count_registered_modules(temp_dir.name, verbose=True)
self.assertTrue(
osp.exists(
osp.join(temp_dir.name, 'modules_statistic_results.json')))
registries_info = results['registries']
for registry in registries_info:
self.assertTrue(hasattr(root, registry))
self.assertEqual(registries_info[registry][0]['num_modules'],
len(getattr(root, registry).module_dict))
temp_dir.cleanup()
# test not saving results
count_registered_modules(save_path=None, verbose=False)
self.assertFalse(
osp.exists(
osp.join(temp_dir.name, 'modules_statistic_results.json')))
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from tempfile import TemporaryDirectory
from unittest import TestCase
from mmengine.registry import (Registry, count_registered_modules, root,
traverse_registry_tree)
class TestUtils(TestCase):
def test_traverse_registry_tree(self):
# Hierarchical Registry
# DOGS
# _______|_______
# | |
# HOUNDS (hound) SAMOYEDS (samoyed)
# _______|_______ |
# | | |
# LITTLE_HOUNDS MID_HOUNDS LITTLE_SAMOYEDS
# (little_hound) (mid_hound) (little_samoyed)
DOGS = Registry('dogs')
HOUNDS = Registry('dogs', parent=DOGS, scope='hound')
LITTLE_HOUNDS = Registry( # noqa
'dogs', parent=HOUNDS, scope='little_hound')
MID_HOUNDS = Registry('dogs', parent=HOUNDS, scope='mid_hound')
SAMOYEDS = Registry('dogs', parent=DOGS, scope='samoyed')
LITTLE_SAMOYEDS = Registry( # noqa
'dogs', parent=SAMOYEDS, scope='little_samoyed')
@DOGS.register_module()
class GoldenRetriever:
pass
# traversing the tree from the root
result = traverse_registry_tree(DOGS)
self.assertEqual(result[0]['num_modules'], 1)
self.assertEqual(len(result), 6)
# traversing the tree from leaf node
result_leaf = traverse_registry_tree(MID_HOUNDS)
# result from any node should be the same
self.assertEqual(result, result_leaf)
def test_count_all_registered_modules(self):
temp_dir = TemporaryDirectory()
results = count_registered_modules(temp_dir.name, verbose=True)
self.assertTrue(
osp.exists(
osp.join(temp_dir.name, 'modules_statistic_results.json')))
registries_info = results['registries']
for registry in registries_info:
self.assertTrue(hasattr(root, registry))
self.assertEqual(registries_info[registry][0]['num_modules'],
len(getattr(root, registry).module_dict))
temp_dir.cleanup()
# test not saving results
count_registered_modules(save_path=None, verbose=False)
self.assertFalse(
osp.exists(
osp.join(temp_dir.name, 'modules_statistic_results.json')))
|
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_CPU_VIDEO_DECODER,
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_gif,
decode_image,
decode_jpeg,
decode_png,
decode_webp,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_CPU_VIDEO_DECODER",
"_HAS_VIDEO_OPT",
"_HAS_GPU_VIDEO_DECODER",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"decode_webp",
"decode_gif",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
|
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_CPU_VIDEO_DECODER,
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_gif,
decode_image,
decode_jpeg,
decode_png,
decode_webp,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_CPU_VIDEO_DECODER",
"_HAS_VIDEO_OPT",
"_HAS_GPU_VIDEO_DECODER",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
|
import logging
import re
from collections.abc import Sequence
from typing import Optional, Union
from urllib.parse import urljoin, urlparse
logger = logging.getLogger(__name__)
PREFIXES_TO_IGNORE = ("javascript:", "mailto:", "#")
SUFFIXES_TO_IGNORE = (
".css",
".js",
".ico",
".png",
".jpg",
".jpeg",
".gif",
".svg",
".csv",
".bz2",
".zip",
".epub",
)
SUFFIXES_TO_IGNORE_REGEX = (
"(?!" + "|".join([re.escape(s) + r"[\#'\"]" for s in SUFFIXES_TO_IGNORE]) + ")"
)
PREFIXES_TO_IGNORE_REGEX = (
"(?!" + "|".join([re.escape(s) for s in PREFIXES_TO_IGNORE]) + ")"
)
DEFAULT_LINK_REGEX = (
rf"href=[\"']{PREFIXES_TO_IGNORE_REGEX}((?:{SUFFIXES_TO_IGNORE_REGEX}.)*?)[\#'\"]"
)
def find_all_links(
raw_html: str, *, pattern: Union[str, re.Pattern, None] = None
) -> list[str]:
"""Extract all links from a raw HTML string.
Args:
raw_html: original HTML.
pattern: Regex to use for extracting links from raw HTML.
Returns:
List[str]: all links
"""
pattern = pattern or DEFAULT_LINK_REGEX
return list(set(re.findall(pattern, raw_html)))
def extract_sub_links(
raw_html: str,
url: str,
*,
base_url: Optional[str] = None,
pattern: Union[str, re.Pattern, None] = None,
prevent_outside: bool = True,
exclude_prefixes: Sequence[str] = (),
continue_on_failure: bool = False,
) -> list[str]:
"""Extract all links from a raw HTML string and convert into absolute paths.
Args:
raw_html: original HTML.
url: the url of the HTML.
base_url: the base URL to check for outside links against.
pattern: Regex to use for extracting links from raw HTML.
prevent_outside: If True, ignore external links which are not children
of the base URL.
exclude_prefixes: Exclude any URLs that start with one of these prefixes.
continue_on_failure: If True, continue if parsing a specific link raises an
exception. Otherwise, raise the exception.
Returns:
List[str]: sub links.
"""
base_url_to_use = base_url if base_url is not None else url
parsed_base_url = urlparse(base_url_to_use)
parsed_url = urlparse(url)
all_links = find_all_links(raw_html, pattern=pattern)
absolute_paths = set()
for link in all_links:
try:
parsed_link = urlparse(link)
# Some may be absolute links like https://to/path
if parsed_link.scheme == "http" or parsed_link.scheme == "https":
absolute_path = link
# Some may have omitted the protocol like //to/path
elif link.startswith("//"):
absolute_path = f"{parsed_url.scheme}:{link}"
else:
absolute_path = urljoin(url, parsed_link.path)
if parsed_link.query:
absolute_path += f"?{parsed_link.query}"
absolute_paths.add(absolute_path)
except Exception as e:
if continue_on_failure:
logger.warning(
"Unable to load link %s. Raised exception:\n\n%s", link, e
)
continue
raise
results = []
for path in absolute_paths:
if any(path.startswith(exclude_prefix) for exclude_prefix in exclude_prefixes):
continue
if prevent_outside:
parsed_path = urlparse(path)
if parsed_base_url.netloc != parsed_path.netloc:
continue
# Will take care of verifying rest of path after netloc
# if it's more specific
if not path.startswith(base_url_to_use):
continue
results.append(path)
return results
|
import logging
import re
from collections.abc import Sequence
from typing import Optional, Union
from urllib.parse import urljoin, urlparse
logger = logging.getLogger(__name__)
PREFIXES_TO_IGNORE = ("javascript:", "mailto:", "#")
SUFFIXES_TO_IGNORE = (
".css",
".js",
".ico",
".png",
".jpg",
".jpeg",
".gif",
".svg",
".csv",
".bz2",
".zip",
".epub",
)
SUFFIXES_TO_IGNORE_REGEX = (
"(?!" + "|".join([re.escape(s) + r"[\#'\"]" for s in SUFFIXES_TO_IGNORE]) + ")"
)
PREFIXES_TO_IGNORE_REGEX = (
"(?!" + "|".join([re.escape(s) for s in PREFIXES_TO_IGNORE]) + ")"
)
DEFAULT_LINK_REGEX = (
rf"href=[\"']{PREFIXES_TO_IGNORE_REGEX}((?:{SUFFIXES_TO_IGNORE_REGEX}.)*?)[\#'\"]"
)
def find_all_links(
raw_html: str, *, pattern: Union[str, re.Pattern, None] = None
) -> list[str]:
"""Extract all links from a raw HTML string.
Args:
raw_html: original HTML.
pattern: Regex to use for extracting links from raw HTML.
Returns:
List[str]: all links
"""
pattern = pattern or DEFAULT_LINK_REGEX
return list(set(re.findall(pattern, raw_html)))
def extract_sub_links(
raw_html: str,
url: str,
*,
base_url: Optional[str] = None,
pattern: Union[str, re.Pattern, None] = None,
prevent_outside: bool = True,
exclude_prefixes: Sequence[str] = (),
continue_on_failure: bool = False,
) -> list[str]:
"""Extract all links from a raw HTML string and convert into absolute paths.
Args:
raw_html: original HTML.
url: the url of the HTML.
base_url: the base URL to check for outside links against.
pattern: Regex to use for extracting links from raw HTML.
prevent_outside: If True, ignore external links which are not children
of the base URL.
exclude_prefixes: Exclude any URLs that start with one of these prefixes.
continue_on_failure: If True, continue if parsing a specific link raises an
exception. Otherwise, raise the exception.
Returns:
List[str]: sub links.
"""
base_url_to_use = base_url if base_url is not None else url
parsed_base_url = urlparse(base_url_to_use)
parsed_url = urlparse(url)
all_links = find_all_links(raw_html, pattern=pattern)
absolute_paths = set()
for link in all_links:
try:
parsed_link = urlparse(link)
# Some may be absolute links like https://to/path
if parsed_link.scheme == "http" or parsed_link.scheme == "https":
absolute_path = link
# Some may have omitted the protocol like //to/path
elif link.startswith("//"):
absolute_path = f"{parsed_url.scheme}:{link}"
else:
absolute_path = urljoin(url, parsed_link.path)
if parsed_link.query:
absolute_path += f"?{parsed_link.query}"
absolute_paths.add(absolute_path)
except Exception as e:
if continue_on_failure:
logger.warning(f"Unable to load link {link}. Raised exception:\n\n{e}")
continue
raise
results = []
for path in absolute_paths:
if any(path.startswith(exclude_prefix) for exclude_prefix in exclude_prefixes):
continue
if prevent_outside:
parsed_path = urlparse(path)
if parsed_base_url.netloc != parsed_path.netloc:
continue
# Will take care of verifying rest of path after netloc
# if it's more specific
if not path.startswith(base_url_to_use):
continue
results.append(path)
return results
|
import numpy as np
from docarray import DocumentArray, Document
def random_docs(
num_docs,
chunks_per_doc=5,
embed_dim=10,
jitter=1,
start_id=0,
embedding=True,
sparse_embedding=False,
text='hello world',
) -> DocumentArray:
da = DocumentArray()
next_chunk_doc_id = start_id + num_docs
for j in range(num_docs):
doc_id = str(start_id + j)
d = Document(id=doc_id)
d.text = text
d.tags['id'] = f'myself id is: {doc_id}'
if embedding:
if sparse_embedding:
from scipy.sparse import coo_matrix
d.embedding = coo_matrix(
(np.array([1, 1, 1]), (np.array([0, 1, 2]), np.array([1, 2, 1])))
)
else:
d.embedding = np.random.random(
[embed_dim + np.random.randint(0, jitter)]
)
for _ in range(chunks_per_doc):
chunk_doc_id = str(next_chunk_doc_id)
c = Document(id=chunk_doc_id)
c.text = 'i\'m chunk %s from doc %s' % (chunk_doc_id, doc_id)
if embedding:
c.embedding = np.random.random(
[embed_dim + np.random.randint(0, jitter)]
)
c.tags['parent_id'] = f'my parent is: {id}'
c.tags['id'] = f'myself id is: {doc_id}'
d.chunks.append(c)
next_chunk_doc_id += 1
da.append(d)
return da
|
import numpy as np
from docarray import DocumentArray, Document
def random_docs(
num_docs,
chunks_per_doc=5,
embed_dim=10,
jitter=1,
start_id=0,
embedding=True,
sparse_embedding=False,
text='hello world',
) -> DocumentArray:
da = DocumentArray()
next_chunk_doc_id = start_id + num_docs
for j in range(num_docs):
doc_id = str(start_id + j)
d = Document(id=doc_id)
d.text = text
d.tags['id'] = f'myself id is: {doc_id}'
if embedding:
if sparse_embedding:
from scipy.sparse import coo_matrix
d.embedding = coo_matrix(
(np.array([1, 1, 1]), (np.array([0, 1, 2]), np.array([1, 2, 1])))
)
else:
d.embedding = np.random.random(
[embed_dim + np.random.randint(0, jitter)]
)
for _ in range(chunks_per_doc):
chunk_doc_id = str(next_chunk_doc_id)
c = Document(id=chunk_doc_id)
c.text = 'i\'m chunk %s from doc %s' % (chunk_doc_id, doc_id)
if embedding:
c.embedding = np.random.random(
[embed_dim + np.random.randint(0, jitter)]
)
c.tags['parent_id'] = f'my parent is: {id}'
c.tags['id'] = f'myself id is: {doc_id}'
d.chunks.append(c)
next_chunk_doc_id += 1
da.append(d)
return da
|
"""
This tool allows agents to interact with the NASA API, specifically
the the NASA Image & Video Library and Exoplanet
"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.nasa import NasaAPIWrapper
class NasaAction(BaseTool):
"""Tool that queries the Atlassian Jira API."""
api_wrapper: NasaAPIWrapper = Field(default_factory=NasaAPIWrapper)
mode: str
name: str = ""
description: str = ""
def _run(
self,
instructions: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the NASA API to run an operation."""
return self.api_wrapper.run(self.mode, instructions)
|
"""
This tool allows agents to interact with the NASA API, specifically
the the NASA Image & Video Library and Exoplanet
"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.nasa import NasaAPIWrapper
class NasaAction(BaseTool): # type: ignore[override]
"""Tool that queries the Atlassian Jira API."""
api_wrapper: NasaAPIWrapper = Field(default_factory=NasaAPIWrapper)
mode: str
name: str = ""
description: str = ""
def _run(
self,
instructions: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the NASA API to run an operation."""
return self.api_wrapper.run(self.mode, instructions)
|
import os
from typing import Type
from pydantic import BaseModel, Field
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.typing import ID
from .mixins import ProtoMixin
class BaseDocument(BaseModel, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: ID = Field(default_factory=lambda: ID.validate(os.urandom(16).hex()))
@classmethod
def _get_nested_document_class(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].type_
|
import os
from typing import Union
from uuid import UUID
from pydantic import BaseModel, Field
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from .mixins import ProtoMixin
class BaseDocument(BaseModel, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: Union[int, str, UUID] = Field(default_factory=lambda: os.urandom(16).hex())
|
from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import PIL.Image
import torch
from torchvision.transforms import InterpolationMode
from ._datapoint import Datapoint, FillTypeJIT
class Mask(Datapoint):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.prototype.transforms import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return tuple(self.shape[-2:]) # type: ignore[return-value]
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: Optional[bool] = None,
) -> Mask:
output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
antialias: Optional[bool] = None,
) -> Mask:
output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: Union[int, List[int]],
fill: FillTypeJIT = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self.as_subclass(torch.Tensor),
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Mask:
output = self._F.perspective_mask(
self.as_subclass(torch.Tensor), startpoints, endpoints, fill=fill, coefficients=coefficients
)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
|
from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import torch
from torchvision.transforms import InterpolationMode
from ._datapoint import Datapoint, FillTypeJIT
class Mask(Datapoint):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return tuple(self.shape[-2:]) # type: ignore[return-value]
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: Optional[bool] = None,
) -> Mask:
output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
antialias: Optional[bool] = None,
) -> Mask:
output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: Union[int, List[int]],
fill: FillTypeJIT = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self.as_subclass(torch.Tensor),
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Mask:
output = self._F.perspective_mask(
self.as_subclass(torch.Tensor), startpoints, endpoints, fill=fill, coefficients=coefficients
)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
|
from typing import Union
import numpy as np
import pytest
import torch
from docarray import BaseDocument, DocumentArray
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def batch():
class Image(BaseDocument):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
return batch.stack()
def test_len(batch):
assert len(batch) == 10
def test_getitem(batch):
for i in range(len(batch)):
print(i)
assert (batch[i].tensor == torch.zeros(3, 224, 224)).all()
def test_iterator(batch):
for doc in batch:
assert (doc.tensor == torch.zeros(3, 224, 224)).all()
def test_stack_setter(batch):
batch.tensor = torch.ones(10, 3, 224, 224)
assert (batch.tensor == torch.ones(10, 3, 224, 224)).all()
def test_stack_optional(batch):
assert (batch._columns['tensor'] == torch.zeros(10, 3, 224, 224)).all()
assert (batch.tensor == torch.zeros(10, 3, 224, 224)).all()
def test_stack_numpy():
class Image(BaseDocument):
tensor: NdArray[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
assert (batch._columns['tensor'] == np.zeros((10, 3, 224, 224))).all()
assert (batch.tensor == np.zeros((10, 3, 224, 224))).all()
assert batch.tensor.ctypes.data == batch._columns['tensor'].ctypes.data
batch.unstack()
def test_stack(batch):
assert (batch._columns['tensor'] == torch.zeros(10, 3, 224, 224)).all()
assert (batch.tensor == torch.zeros(10, 3, 224, 224)).all()
assert batch._columns['tensor'].data_ptr() == batch.tensor.data_ptr()
for doc, tensor in zip(batch, batch.tensor):
assert doc.tensor.data_ptr() == tensor.data_ptr()
for i in range(len(batch)):
assert batch[i].tensor.data_ptr() == batch.tensor[i].data_ptr()
def test_stack_mod_nested_document():
class Image(BaseDocument):
tensor: TorchTensor[3, 224, 224]
class MMdoc(BaseDocument):
img: Image
batch = DocumentArray[MMdoc](
[MMdoc(img=Image(tensor=torch.zeros(3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
assert (
batch._columns['img']._columns['tensor'] == torch.zeros(10, 3, 224, 224)
).all()
assert (batch.img.tensor == torch.zeros(10, 3, 224, 224)).all()
assert (
batch._columns['img']._columns['tensor'].data_ptr()
== batch.img.tensor.data_ptr()
)
def test_convert_to_da(batch):
class Image(BaseDocument):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
batch = batch.stack()
da = batch.unstack()
for doc in da:
assert (doc.tensor == torch.zeros(3, 224, 224)).all()
def test_unstack_nested_document():
class Image(BaseDocument):
tensor: TorchTensor[3, 224, 224]
class MMdoc(BaseDocument):
img: Image
batch = DocumentArray[MMdoc](
[MMdoc(img=Image(tensor=torch.zeros(3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
da = batch.unstack()
for doc in da:
assert (doc.img.tensor == torch.zeros(3, 224, 224)).all()
def test_proto_stacked_mode_torch(batch):
batch.from_protobuf(batch.to_protobuf())
def test_proto_stacked_mode_numpy():
class MyDoc(BaseDocument):
tensor: NdArray[3, 224, 224]
da = DocumentArray[MyDoc](
[MyDoc(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
da = da.stack()
da.from_protobuf(da.to_protobuf())
def test_stack_call():
class Image(BaseDocument):
tensor: TorchTensor[3, 224, 224]
da = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
da = da.stack()
assert len(da) == 10
assert da.tensor.shape == (10, 3, 224, 224)
def test_context_manager():
class Image(BaseDocument):
tensor: TorchTensor[3, 224, 224]
da = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
with da.stacked_mode() as da:
assert len(da) == 10
assert da.tensor.shape == (10, 3, 224, 224)
da.tensor = torch.ones(10, 3, 224, 224)
tensor = da.tensor
assert isinstance(tensor, list)
for doc in da:
assert (doc.tensor == torch.ones(3, 224, 224)).all()
def test_stack_union():
class Image(BaseDocument):
tensor: Union[TorchTensor[3, 224, 224], NdArray[3, 224, 224]]
batch = DocumentArray[Image](
[Image(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
batch[3].tensor = np.zeros((3, 224, 224))
# union fields aren't actually stacked
# just checking that there is no error
batch.stack()
|
from typing import Union
import numpy as np
import pytest
import torch
from docarray import Document, DocumentArray
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def batch():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
return batch.stack()
def test_len(batch):
assert len(batch) == 10
def test_getitem(batch):
for i in range(len(batch)):
print(i)
assert (batch[i].tensor == torch.zeros(3, 224, 224)).all()
def test_iterator(batch):
for doc in batch:
assert (doc.tensor == torch.zeros(3, 224, 224)).all()
def test_stack_setter(batch):
batch.tensor = torch.ones(10, 3, 224, 224)
assert (batch.tensor == torch.ones(10, 3, 224, 224)).all()
def test_stack_optional(batch):
assert (batch._columns['tensor'] == torch.zeros(10, 3, 224, 224)).all()
assert (batch.tensor == torch.zeros(10, 3, 224, 224)).all()
def test_stack_numpy():
class Image(Document):
tensor: NdArray[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
assert (batch._columns['tensor'] == np.zeros((10, 3, 224, 224))).all()
assert (batch.tensor == np.zeros((10, 3, 224, 224))).all()
assert batch.tensor.ctypes.data == batch._columns['tensor'].ctypes.data
batch.unstack()
def test_stack(batch):
assert (batch._columns['tensor'] == torch.zeros(10, 3, 224, 224)).all()
assert (batch.tensor == torch.zeros(10, 3, 224, 224)).all()
assert batch._columns['tensor'].data_ptr() == batch.tensor.data_ptr()
for doc, tensor in zip(batch, batch.tensor):
assert doc.tensor.data_ptr() == tensor.data_ptr()
for i in range(len(batch)):
assert batch[i].tensor.data_ptr() == batch.tensor[i].data_ptr()
def test_stack_mod_nested_document():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
class MMdoc(Document):
img: Image
batch = DocumentArray[MMdoc](
[MMdoc(img=Image(tensor=torch.zeros(3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
assert (
batch._columns['img']._columns['tensor'] == torch.zeros(10, 3, 224, 224)
).all()
assert (batch.img.tensor == torch.zeros(10, 3, 224, 224)).all()
assert (
batch._columns['img']._columns['tensor'].data_ptr()
== batch.img.tensor.data_ptr()
)
def test_convert_to_da(batch):
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
batch = batch.stack()
da = batch.unstack()
for doc in da:
assert (doc.tensor == torch.zeros(3, 224, 224)).all()
def test_unstack_nested_document():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
class MMdoc(Document):
img: Image
batch = DocumentArray[MMdoc](
[MMdoc(img=Image(tensor=torch.zeros(3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
da = batch.unstack()
for doc in da:
assert (doc.img.tensor == torch.zeros(3, 224, 224)).all()
def test_proto_stacked_mode_torch(batch):
batch.from_protobuf(batch.to_protobuf())
def test_proto_stacked_mode_numpy():
class MyDoc(Document):
tensor: NdArray[3, 224, 224]
da = DocumentArray[MyDoc](
[MyDoc(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
da = da.stack()
da.from_protobuf(da.to_protobuf())
def test_stack_call():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
da = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
da = da.stack()
assert len(da) == 10
assert da.tensor.shape == (10, 3, 224, 224)
def test_context_manager():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
da = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
with da.stacked_mode() as da:
assert len(da) == 10
assert da.tensor.shape == (10, 3, 224, 224)
da.tensor = torch.ones(10, 3, 224, 224)
tensor = da.tensor
assert isinstance(tensor, list)
for doc in da:
assert (doc.tensor == torch.ones(3, 224, 224)).all()
def test_stack_union():
class Image(Document):
tensor: Union[TorchTensor[3, 224, 224], NdArray[3, 224, 224]]
batch = DocumentArray[Image](
[Image(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
batch[3].tensor = np.zeros((3, 224, 224))
# union fields aren't actually stacked
# just checking that there is no error
batch.stack()
|
import sys
import pytest
from hypothesis import given, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_extmem_qdm, check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_batch("hist", "cuda")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
@given(
strategies.integers(1, 2048),
strategies.integers(1, 8),
strategies.integers(1, 4),
strategies.integers(2, 16),
strategies.booleans(),
)
@settings(deadline=None, max_examples=10, print_blob=True)
@pytest.mark.filterwarnings("ignore")
def test_extmem_qdm(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
n_bins: int,
on_host: bool,
) -> None:
check_extmem_qdm(
n_samples_per_batch,
n_features,
n_batches=n_batches,
n_bins=n_bins,
device="cuda",
on_host=on_host,
)
@pytest.mark.filterwarnings("ignore")
def test_invalid_device_extmem_qdm() -> None:
it = tm.IteratorForTest(
*tm.make_batches(16, 4, 2, use_cupy=False), cache="cache", on_host=True
)
Xy = xgb.ExtMemQuantileDMatrix(it)
with pytest.raises(ValueError, match="cannot be used for GPU"):
xgb.train({"device": "cuda"}, Xy)
it = tm.IteratorForTest(
*tm.make_batches(16, 4, 2, use_cupy=True), cache="cache", on_host=True
)
Xy = xgb.ExtMemQuantileDMatrix(it)
with pytest.raises(ValueError, match="cannot be used for CPU"):
xgb.train({"device": "cpu"}, Xy)
def test_concat_pages() -> None:
it = tm.IteratorForTest(*tm.make_batches(64, 16, 4, use_cupy=True), cache=None)
Xy = xgb.ExtMemQuantileDMatrix(it)
with pytest.raises(ValueError, match="can not be used with concatenated pages"):
xgb.train(
{
"device": "cuda",
"subsample": 0.5,
"sampling_method": "gradient_based",
"extmem_concat_pages": True,
"objective": "reg:absoluteerror",
},
Xy,
)
@given(
strategies.integers(1, 64),
strategies.integers(1, 8),
strategies.integers(1, 4),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_quantile_objective(
n_samples_per_batch: int, n_features: int, n_batches: int
) -> None:
check_quantile_loss_extmem(
n_samples_per_batch,
n_features,
n_batches,
"hist",
"cuda",
)
check_quantile_loss_extmem(
n_samples_per_batch,
n_features,
n_batches,
"approx",
"cuda",
)
|
import sys
import pytest
from hypothesis import given, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_extmem_qdm, check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_batch("hist", "cuda")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
@given(
strategies.integers(1, 2048),
strategies.integers(1, 8),
strategies.integers(1, 4),
strategies.booleans(),
)
@settings(deadline=None, max_examples=10, print_blob=True)
@pytest.mark.filterwarnings("ignore")
def test_extmem_qdm(
n_samples_per_batch: int, n_features: int, n_batches: int, on_host: bool
) -> None:
check_extmem_qdm(n_samples_per_batch, n_features, n_batches, "cuda", on_host)
@pytest.mark.filterwarnings("ignore")
def test_invalid_device_extmem_qdm() -> None:
it = tm.IteratorForTest(
*tm.make_batches(16, 4, 2, use_cupy=False), cache="cache", on_host=True
)
Xy = xgb.ExtMemQuantileDMatrix(it)
with pytest.raises(ValueError, match="cannot be used for GPU"):
xgb.train({"device": "cuda"}, Xy)
it = tm.IteratorForTest(
*tm.make_batches(16, 4, 2, use_cupy=True), cache="cache", on_host=True
)
Xy = xgb.ExtMemQuantileDMatrix(it)
with pytest.raises(ValueError, match="cannot be used for CPU"):
xgb.train({"device": "cpu"}, Xy)
def test_concat_pages() -> None:
it = tm.IteratorForTest(*tm.make_batches(64, 16, 4, use_cupy=True), cache=None)
Xy = xgb.ExtMemQuantileDMatrix(it)
with pytest.raises(ValueError, match="can not be used with concatenated pages"):
xgb.train(
{
"device": "cuda",
"subsample": 0.5,
"sampling_method": "gradient_based",
"extmem_concat_pages": True,
"objective": "reg:absoluteerror",
},
Xy,
)
@given(
strategies.integers(1, 64),
strategies.integers(1, 8),
strategies.integers(1, 4),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_quantile_objective(
n_samples_per_batch: int, n_features: int, n_batches: int
) -> None:
check_quantile_loss_extmem(
n_samples_per_batch,
n_features,
n_batches,
"hist",
"cuda",
)
check_quantile_loss_extmem(
n_samples_per_batch,
n_features,
n_batches,
"approx",
"cuda",
)
|
"""Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ..exceptions import DataConversionWarning
from . import metadata_routing
from ._bunch import Bunch
from ._chunking import gen_batches, gen_even_slices
from ._estimator_html_repr import estimator_html_repr
# Make _safe_indexing importable from here for backward compat as this particular
# helper is considered semi-private and typically very useful for third-party
# libraries that want to comply with scikit-learn's estimator API. In particular,
# _safe_indexing was included in our public API documentation despite the leading
# `_` in its name.
from ._indexing import (
_safe_indexing, # noqa
resample,
shuffle,
)
from ._mask import safe_mask
from ._tags import (
ClassifierTags,
InputTags,
RegressorTags,
Tags,
TargetTags,
TransformerTags,
get_tags,
)
from .class_weight import compute_class_weight, compute_sample_weight
from .deprecation import deprecated
from .discovery import all_estimators
from .extmath import safe_sqr
from .murmurhash import murmurhash3_32
from .validation import (
as_float_array,
assert_all_finite,
check_array,
check_consistent_length,
check_random_state,
check_scalar,
check_symmetric,
check_X_y,
column_or_1d,
indexable,
)
__all__ = [
"Bunch",
"ClassifierTags",
"DataConversionWarning",
"InputTags",
"RegressorTags",
"Tags",
"TargetTags",
"TransformerTags",
"all_estimators",
"as_float_array",
"assert_all_finite",
"check_X_y",
"check_array",
"check_consistent_length",
"check_random_state",
"check_scalar",
"check_symmetric",
"column_or_1d",
"compute_class_weight",
"compute_sample_weight",
"deprecated",
"estimator_html_repr",
"gen_batches",
"gen_even_slices",
"get_tags",
"indexable",
"metadata_routing",
"murmurhash3_32",
"resample",
"safe_mask",
"safe_sqr",
"shuffle",
]
|
"""Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import platform
import warnings
from collections.abc import Sequence
import numpy as np
from ..exceptions import DataConversionWarning
from . import _joblib, metadata_routing
from ._bunch import Bunch
from ._chunking import gen_batches, gen_even_slices
from ._estimator_html_repr import estimator_html_repr
# Make _safe_indexing importable from here for backward compat as this particular
# helper is considered semi-private and typically very useful for third-party
# libraries that want to comply with scikit-learn's estimator API. In particular,
# _safe_indexing was included in our public API documentation despite the leading
# `_` in its name.
from ._indexing import (
_safe_indexing, # noqa
resample,
shuffle,
)
from ._mask import safe_mask
from ._tags import (
ClassifierTags,
InputTags,
RegressorTags,
Tags,
TargetTags,
TransformerTags,
get_tags,
)
from .class_weight import compute_class_weight, compute_sample_weight
from .deprecation import deprecated
from .discovery import all_estimators
from .extmath import safe_sqr
from .murmurhash import murmurhash3_32
from .validation import (
as_float_array,
assert_all_finite,
check_array,
check_consistent_length,
check_random_state,
check_scalar,
check_symmetric,
check_X_y,
column_or_1d,
indexable,
)
# TODO(1.7): remove parallel_backend and register_parallel_backend
msg = "deprecated in 1.5 to be removed in 1.7. Use joblib.{} instead."
register_parallel_backend = deprecated(msg)(_joblib.register_parallel_backend)
# if a class, deprecated will change the object in _joblib module so we need to subclass
@deprecated(msg)
class parallel_backend(_joblib.parallel_backend):
pass
__all__ = [
"Bunch",
"ClassifierTags",
"DataConversionWarning",
"InputTags",
"RegressorTags",
"Tags",
"TargetTags",
"TransformerTags",
"all_estimators",
"as_float_array",
"assert_all_finite",
"check_X_y",
"check_array",
"check_consistent_length",
"check_random_state",
"check_scalar",
"check_symmetric",
"column_or_1d",
"compute_class_weight",
"compute_sample_weight",
"deprecated",
"estimator_html_repr",
"gen_batches",
"gen_even_slices",
"get_tags",
"indexable",
"metadata_routing",
"murmurhash3_32",
"parallel_backend",
"register_parallel_backend",
"resample",
"safe_mask",
"safe_sqr",
"shuffle",
]
# TODO(1.7): remove
def __getattr__(name):
if name == "IS_PYPY":
warnings.warn(
"IS_PYPY is deprecated and will be removed in 1.7.",
FutureWarning,
)
return platform.python_implementation() == "PyPy"
raise AttributeError(f"module {__name__} has no attribute {name}")
# TODO(1.7): remove tosequence
@deprecated("tosequence was deprecated in 1.5 and will be removed in 1.7")
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible.
Parameters
----------
x : iterable
The iterable to be converted.
Returns
-------
x : Sequence
If `x` is a NumPy array, it returns it as a `ndarray`. If `x`
is a `Sequence`, `x` is returned as-is. If `x` is from any other
type, `x` is returned casted as a list.
"""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.116.1"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.116.0"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
import logging
import os
from argparse import ArgumentParser
import sentencepiece as spm
from average_checkpoints import ensemble
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from transforms import get_data_module
def get_trainer(args):
seed_everything(1)
checkpoint = ModelCheckpoint(
dirpath=os.path.join(args.exp_dir, args.exp_name) if args.exp_dir else None,
monitor="monitoring_step",
mode="max",
save_last=True,
filename="{epoch}",
save_top_k=10,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
lr_monitor,
]
return Trainer(
sync_batchnorm=True,
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.num_nodes,
devices=args.gpus,
accelerator="gpu",
strategy=DDPStrategy(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
gradient_clip_val=10.0,
)
def get_lightning_module(args):
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
if args.modality == "audiovisual":
from lightning_av import AVConformerRNNTModule
model = AVConformerRNNTModule(args, sp_model)
else:
from lightning import ConformerRNNTModule
model = ConformerRNNTModule(args, sp_model)
return model
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--modality",
type=str,
help="Modality",
required=True,
)
parser.add_argument(
"--mode",
type=str,
help="Perform online or offline recognition.",
required=True,
)
parser.add_argument(
"--root-dir",
type=str,
help="Root directory to LRS3 audio-visual datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=str,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--pretrained-model-path",
type=str,
help="Path to Pretraned model.",
)
parser.add_argument(
"--exp-dir",
default="./exp",
type=str,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--exp-name",
type=str,
help="Experiment name",
)
parser.add_argument(
"--num-nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=55,
type=int,
help="Number of epochs to train for. (Default: 55)",
)
parser.add_argument(
"--resume-from-checkpoint",
default=None,
type=str,
help="Path to the checkpoint to resume from",
)
parser.add_argument(
"--debug",
action="store_true",
help="Whether to use debug level for logging",
)
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
model = get_lightning_module(args)
data_module = get_data_module(args, str(args.sp_model_path))
trainer = get_trainer(args)
trainer.fit(model, data_module)
ensemble(args)
if __name__ == "__main__":
cli_main()
|
import logging
import os
from argparse import ArgumentParser
import sentencepiece as spm
from average_checkpoints import ensemble
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from transforms import get_data_module
def get_trainer(args):
seed_everything(1)
checkpoint = ModelCheckpoint(
dirpath=os.path.join(args.exp_dir, args.exp_name) if args.exp_dir else None,
monitor="monitoring_step",
mode="max",
save_last=True,
filename="{epoch}",
save_top_k=10,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
lr_monitor,
]
return Trainer(
sync_batchnorm=True,
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.num_nodes,
devices=args.gpus,
accelerator="gpu",
strategy=DDPStrategy(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
)
def get_lightning_module(args):
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
if args.modality == "audiovisual":
from lightning_av import AVConformerRNNTModule
model = AVConformerRNNTModule(args, sp_model)
else:
from lightning import ConformerRNNTModule
model = ConformerRNNTModule(args, sp_model)
return model
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--modality",
type=str,
help="Modality",
required=True,
)
parser.add_argument(
"--mode",
type=str,
help="Perform online or offline recognition.",
required=True,
)
parser.add_argument(
"--root-dir",
type=str,
help="Root directory to LRS3 audio-visual datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=str,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--pretrained-model-path",
type=str,
help="Path to Pretraned model.",
)
parser.add_argument(
"--exp-dir",
default="./exp",
type=str,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--exp-name",
type=str,
help="Experiment name",
)
parser.add_argument(
"--num-nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=55,
type=int,
help="Number of epochs to train for. (Default: 55)",
)
parser.add_argument(
"--resume-from-checkpoint",
default=None,
type=str,
help="Path to the checkpoint to resume from",
)
parser.add_argument(
"--debug",
action="store_true",
help="Whether to use debug level for logging",
)
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
model = get_lightning_module(args)
data_module = get_data_module(args, str(args.sp_model_path))
trainer = get_trainer(args)
trainer.fit(model, data_module)
ensemble(args)
if __name__ == "__main__":
cli_main()
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.librispeech import _get_librispeech_metadata
from torchaudio.datasets.utils import extract_archive
_ARCHIVE_NAME = "librispeech_finetuning"
_URL = "https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz"
_CHECKSUM = "5d1efdc777b548194d7e09ba89126e2188026df9fd57aa57eb14408d2b2342af"
_SUBSET_MAP = {"10min": ["1h/0"], "1h": ["1h/*"], "10h": ["1h/*", "9h"]}
def _get_fileids_paths(path: Path, folders: List[str], _ext_audio: str) -> List[Tuple[str, str]]:
"""Get the file names and the corresponding file paths without `speaker_id`
and `chapter_id` directories.
The format of path is like:
{root}/{_ARCHIVE_NAME}/1h/[0-5]/[clean, other] or
{root}/{_ARCHIVE_NAME}/9h/[clean, other]
Args:
path (Path): Root path to the dataset.
folders (List[str]): Folders that contain the desired audio files.
_ext_audio (str): Extension of audio files.
Returns:
List[Tuple[str, str]]:
List of tuples where the first element is the relative path to the audio file.
The format of relative path is like:
1h/[0-5]/[clean, other] or 9h/[clean, other]
The second element is the file name without audio extension.
"""
path = Path(path)
files_paths = []
for folder in folders:
paths = [p.relative_to(path) for p in path.glob(f"{folder}/*/*/*/*{_ext_audio}")]
files_paths += [(str(p.parent.parent.parent), str(p.stem)) for p in paths] # get subset folder and file name
files_paths.sort(key=lambda x: x[0] + x[1])
return files_paths
class LibriLightLimited(Dataset):
"""Subset of Libri-light :cite:`librilight` dataset,
which was used in HuBERT :cite:`hsu2021hubert` for supervised fine-tuning.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
subset (str, optional): The subset to use. Options: [``"10min"``, ``"1h"``, ``"10h"``]
(Default: ``"10min"``).
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_txt = ".trans.txt"
_ext_audio = ".flac"
def __init__(
self,
root: Union[str, Path],
subset: str = "10min",
download: bool = False,
) -> None:
if subset not in _SUBSET_MAP:
raise ValueError(f"`subset` must be one of {_SUBSET_MAP.keys()}. Found: {subset}")
folders = _SUBSET_MAP[subset]
root = os.fspath(root)
self._path = os.path.join(root, _ARCHIVE_NAME)
archive = os.path.join(root, f"{_ARCHIVE_NAME}.tgz")
if not os.path.isdir(self._path):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
if not os.path.isfile(archive):
download_url_to_file(_URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive)
self._fileids_paths = _get_fileids_paths(self._path, folders, self._ext_audio)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
Transcript
int:
Speaker ID
int:
Chapter ID
int:
Utterance ID
"""
file_path, fileid = self._fileids_paths[n]
metadata = _get_librispeech_metadata(fileid, self._path, file_path, self._ext_audio, self._ext_txt)
waveform, _ = torchaudio.load(os.path.join(self._path, metadata[0]))
return (waveform,) + metadata[1:]
def __len__(self) -> int:
return len(self._fileids_paths)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.librispeech import _get_librispeech_metadata
from torchaudio.datasets.utils import extract_archive
_ARCHIVE_NAME = "librispeech_finetuning"
_URL = "https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz"
_CHECKSUM = "5d1efdc777b548194d7e09ba89126e2188026df9fd57aa57eb14408d2b2342af"
_SUBSET_MAP = {"10min": ["1h/0"], "1h": ["1h/*"], "10h": ["1h/*", "9h"]}
def _get_fileids_paths(path, folders, _ext_audio) -> List[Tuple[str, str]]:
"""Get the file names and the corresponding file paths without `speaker_id`
and `chapter_id` directories.
The format of path is like:
{root}/{_ARCHIVE_NAME}/1h/[0-5]/[clean, other] or
{root}/{_ARCHIVE_NAME}/9h/[clean, other]
"""
path = Path(path)
files_paths = []
for folder in folders:
paths = [p.relative_to(path) for p in path.glob(f"{folder}/*/*/*/*{_ext_audio}")]
files_paths += [(str(p.parent.parent.parent), str(p.stem)) for p in paths] # get subset folder and file name
files_paths.sort(key=lambda x: x[0] + x[1])
return files_paths
class LibriLightLimited(Dataset):
"""Subset of Libri-light :cite:`librilight` dataset,
which was used in HuBERT :cite:`hsu2021hubert` for supervised fine-tuning.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
subset (str, optional): The subset to use. Options: [``"10min"``, ``"1h"``, ``"10h"``]
(Default: ``"10min"``).
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_txt = ".trans.txt"
_ext_audio = ".flac"
def __init__(
self,
root: Union[str, Path],
subset: str = "10min",
download: bool = False,
) -> None:
if subset not in _SUBSET_MAP:
raise ValueError(f"`subset` must be one of {_SUBSET_MAP.keys()}. Found: {subset}")
folders = _SUBSET_MAP[subset]
root = os.fspath(root)
self._path = os.path.join(root, _ARCHIVE_NAME)
archive = os.path.join(root, f"{_ARCHIVE_NAME}.tgz")
if not os.path.isdir(self._path):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
if not os.path.isfile(archive):
download_url_to_file(_URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive)
self._fileids_paths = _get_fileids_paths(self._path, folders, self._ext_audio)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
Transcript
int:
Speaker ID
int:
Chapter ID
int:
Utterance ID
"""
file_path, fileid = self._fileids_paths[n]
metadata = _get_librispeech_metadata(fileid, self._path, file_path, self._ext_audio, self._ext_txt)
waveform, _ = torchaudio.load(os.path.join(self._path, metadata[0]))
return (waveform,) + metadata[1:]
def __len__(self) -> int:
return len(self._fileids_paths)
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
Lumina2Transformer2DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class Lumina2Transformer2DModelSingleFileTests(unittest.TestCase):
model_class = Lumina2Transformer2DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors"
alternate_keys_ckpt_paths = [
"https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors"
]
repo_id = "Alpha-VLLM/Lumina-Image-2.0"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
torch.cuda.empty_cache()
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
torch.cuda.empty_cache()
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
Lumina2Transformer2DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class Lumina2Transformer2DModelSingleFileTests(unittest.TestCase):
model_class = Lumina2Transformer2DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors"
alternate_keys_ckpt_paths = [
"https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors"
]
repo_id = "Alpha-VLLM/Lumina-Image-2.0"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between single file loading and pretrained loading"
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
torch.cuda.empty_cache()
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
torch.cuda.empty_cache()
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import torch
from transformers import Gemma2Model, GemmaTokenizer
from diffusers import AutoencoderDC, FlowMatchEulerDiscreteScheduler, SanaPipeline, SanaTransformer2DModel
from diffusers.utils.testing_utils import floats_tensor, require_peft_backend
sys.path.append(".")
from utils import PeftLoraLoaderMixinTests # noqa: E402
@require_peft_backend
class SanaLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
pipeline_class = SanaPipeline
scheduler_cls = FlowMatchEulerDiscreteScheduler(shift=7.0)
scheduler_kwargs = {}
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
transformer_kwargs = {
"patch_size": 1,
"in_channels": 4,
"out_channels": 4,
"num_layers": 1,
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_cross_attention_heads": 2,
"cross_attention_head_dim": 4,
"cross_attention_dim": 8,
"caption_channels": 8,
"sample_size": 32,
}
transformer_cls = SanaTransformer2DModel
vae_kwargs = {
"in_channels": 3,
"latent_channels": 4,
"attention_head_dim": 2,
"encoder_block_types": (
"ResBlock",
"EfficientViTBlock",
),
"decoder_block_types": (
"ResBlock",
"EfficientViTBlock",
),
"encoder_block_out_channels": (8, 8),
"decoder_block_out_channels": (8, 8),
"encoder_qkv_multiscales": ((), (5,)),
"decoder_qkv_multiscales": ((), (5,)),
"encoder_layers_per_block": (1, 1),
"decoder_layers_per_block": [1, 1],
"downsample_block_type": "conv",
"upsample_block_type": "interpolate",
"decoder_norm_types": "rms_norm",
"decoder_act_fns": "silu",
"scaling_factor": 0.41407,
}
vae_cls = AutoencoderDC
tokenizer_cls, tokenizer_id = GemmaTokenizer, "hf-internal-testing/dummy-gemma"
text_encoder_cls, text_encoder_id = Gemma2Model, "hf-internal-testing/dummy-gemma-for-diffusers"
@property
def output_shape(self):
return (1, 32, 32, 3)
def get_dummy_inputs(self, with_generator=True):
batch_size = 1
sequence_length = 16
num_channels = 4
sizes = (32, 32)
generator = torch.manual_seed(0)
noise = floats_tensor((batch_size, num_channels) + sizes)
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
pipeline_inputs = {
"prompt": "",
"negative_prompt": "",
"num_inference_steps": 4,
"guidance_scale": 4.5,
"height": 32,
"width": 32,
"max_sequence_length": sequence_length,
"output_type": "np",
"complex_human_instruction": None,
}
if with_generator:
pipeline_inputs.update({"generator": generator})
return noise, input_ids, pipeline_inputs
@unittest.skip("Not supported in SANA.")
def test_modify_padding_mode(self):
pass
@unittest.skip("Not supported in SANA.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass
@unittest.skip("Not supported in SANA.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_partial_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_text_lora_and_scale(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_text_lora_fused(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_text_lora_save_load(self):
pass
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import torch
from transformers import Gemma2Model, GemmaTokenizer
from diffusers import AutoencoderDC, FlowMatchEulerDiscreteScheduler, SanaPipeline, SanaTransformer2DModel
from diffusers.utils.testing_utils import floats_tensor, require_peft_backend
sys.path.append(".")
from utils import PeftLoraLoaderMixinTests # noqa: E402
@require_peft_backend
class SanaLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
pipeline_class = SanaPipeline
scheduler_cls = FlowMatchEulerDiscreteScheduler(shift=7.0)
scheduler_kwargs = {}
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
transformer_kwargs = {
"patch_size": 1,
"in_channels": 4,
"out_channels": 4,
"num_layers": 1,
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_cross_attention_heads": 2,
"cross_attention_head_dim": 4,
"cross_attention_dim": 8,
"caption_channels": 8,
"sample_size": 32,
}
transformer_cls = SanaTransformer2DModel
vae_kwargs = {
"in_channels": 3,
"latent_channels": 4,
"attention_head_dim": 2,
"encoder_block_types": (
"ResBlock",
"EfficientViTBlock",
),
"decoder_block_types": (
"ResBlock",
"EfficientViTBlock",
),
"encoder_block_out_channels": (8, 8),
"decoder_block_out_channels": (8, 8),
"encoder_qkv_multiscales": ((), (5,)),
"decoder_qkv_multiscales": ((), (5,)),
"encoder_layers_per_block": (1, 1),
"decoder_layers_per_block": [1, 1],
"downsample_block_type": "conv",
"upsample_block_type": "interpolate",
"decoder_norm_types": "rms_norm",
"decoder_act_fns": "silu",
"scaling_factor": 0.41407,
}
vae_cls = AutoencoderDC
tokenizer_cls, tokenizer_id = GemmaTokenizer, "hf-internal-testing/dummy-gemma"
text_encoder_cls, text_encoder_id = Gemma2Model, "hf-internal-testing/dummy-gemma-for-diffusers"
@property
def output_shape(self):
return (1, 32, 32, 3)
def get_dummy_inputs(self, with_generator=True):
batch_size = 1
sequence_length = 16
num_channels = 4
sizes = (32, 32)
generator = torch.manual_seed(0)
noise = floats_tensor((batch_size, num_channels) + sizes)
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
pipeline_inputs = {
"prompt": "",
"negative_prompt": "",
"num_inference_steps": 4,
"guidance_scale": 4.5,
"height": 32,
"width": 32,
"max_sequence_length": sequence_length,
"output_type": "np",
"complex_human_instruction": None,
}
if with_generator:
pipeline_inputs.update({"generator": generator})
return noise, input_ids, pipeline_inputs
@unittest.skip("Not supported in SANA.")
def test_modify_padding_mode(self):
pass
@unittest.skip("Not supported in SANA.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass
@unittest.skip("Not supported in SANA.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_partial_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_text_lora_and_scale(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_text_lora_fused(self):
pass
@unittest.skip("Text encoder LoRA is not supported in SANA.")
def test_simple_inference_with_text_lora_save_load(self):
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
import torch
from torch import Tensor
from mmdet.core.utils.typing import ConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class SingleRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
according to its scale. The mapping rule is proposed in
`FPN <https://arxiv.org/abs/1612.03144>`_.
Args:
roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and
arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (List[int]): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0.
Defaults to 56.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
roi_layer: ConfigType,
out_channels: int,
featmap_strides: List[int],
finest_scale: int = 56,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
roi_layer=roi_layer,
out_channels=out_channels,
featmap_strides=featmap_strides,
init_cfg=init_cfg)
self.finest_scale = finest_scale
def map_roi_levels(self, rois: Tensor, num_levels: int) -> Tensor:
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None):
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# TODO: remove this when parrots supports
if torch.__version__ == 'parrots':
roi_feats.requires_grad = True
if num_levels == 1:
if len(rois) == 0:
return roi_feats
return self.roi_layers[0](feats[0], rois)
target_lvls = self.map_roi_levels(rois, num_levels)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
mask = target_lvls == i
inds = mask.nonzero(as_tuple=False).squeeze(1)
if inds.numel() > 0:
rois_ = rois[inds]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
else:
# Sometimes some pyramid levels will not be used for RoI
# feature extraction and this will cause an incomplete
# computation graph in one GPU, which is different from those
# in other GPUs and will cause a hanging error.
# Therefore, we add it to ensure each feature pyramid is
# included in the computation graph to avoid runtime bugs.
roi_feats += sum(
x.view(-1)[0]
for x in self.parameters()) * 0. + feats[i].sum() * 0.
return roi_feats
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.registry import MODELS
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class SingleRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
according to its scale. The mapping rule is proposed in
`FPN <https://arxiv.org/abs/1612.03144>`_.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (List[int]): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0. Default: 56.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
finest_scale=56,
init_cfg=None):
super(SingleRoIExtractor, self).__init__(roi_layer, out_channels,
featmap_strides, init_cfg)
self.finest_scale = finest_scale
def map_roi_levels(self, rois, num_levels):
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
def forward(self, feats, rois, roi_scale_factor=None):
"""Forward function."""
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
expand_dims = (-1, self.out_channels * out_size[0] * out_size[1])
if torch.onnx.is_in_onnx_export():
# Work around to export mask-rcnn to onnx
roi_feats = rois[:, :1].clone().detach()
roi_feats = roi_feats.expand(*expand_dims)
roi_feats = roi_feats.reshape(-1, self.out_channels, *out_size)
roi_feats = roi_feats * 0
else:
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# TODO: remove this when parrots supports
if torch.__version__ == 'parrots':
roi_feats.requires_grad = True
if num_levels == 1:
if len(rois) == 0:
return roi_feats
return self.roi_layers[0](feats[0], rois)
target_lvls = self.map_roi_levels(rois, num_levels)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
mask = target_lvls == i
if torch.onnx.is_in_onnx_export():
# To keep all roi_align nodes exported to onnx
# and skip nonzero op
mask = mask.float().unsqueeze(-1)
# select target level rois and reset the rest rois to zero.
rois_i = rois.clone().detach()
rois_i *= mask
mask_exp = mask.expand(*expand_dims).reshape(roi_feats.shape)
roi_feats_t = self.roi_layers[i](feats[i], rois_i)
roi_feats_t *= mask_exp
roi_feats += roi_feats_t
continue
inds = mask.nonzero(as_tuple=False).squeeze(1)
if inds.numel() > 0:
rois_ = rois[inds]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
else:
# Sometimes some pyramid levels will not be used for RoI
# feature extraction and this will cause an incomplete
# computation graph in one GPU, which is different from those
# in other GPUs and will cause a hanging error.
# Therefore, we add it to ensure each feature pyramid is
# included in the computation graph to avoid runtime bugs.
roi_feats += sum(
x.view(-1)[0]
for x in self.parameters()) * 0. + feats[i].sum() * 0.
return roi_feats
|
import pytest
from langchain._api import suppress_langchain_deprecation_warning as sup2
from langchain_core._api import suppress_langchain_deprecation_warning as sup1
from langchain_cli.namespaces.migrate.generate.generic import (
generate_simplified_migrations,
)
@pytest.mark.xfail(reason="Unknown reason")
def test_create_json_agent_migration() -> None:
"""Test the migration of create_json_agent from langchain to langchain_community."""
with sup1(), sup2():
raw_migrations = generate_simplified_migrations(
from_package="langchain", to_package="langchain_community"
)
json_agent_migrations = [
migration
for migration in raw_migrations
if "create_json_agent" in migration[0]
]
assert json_agent_migrations == [
(
"langchain.agents.create_json_agent",
"langchain_community.agent_toolkits.create_json_agent",
),
(
"langchain.agents.agent_toolkits.create_json_agent",
"langchain_community.agent_toolkits.create_json_agent",
),
(
"langchain.agents.agent_toolkits.json.base.create_json_agent",
"langchain_community.agent_toolkits.create_json_agent",
),
]
@pytest.mark.xfail(reason="Unknown reason")
def test_create_single_store_retriever_db() -> None:
"""Test migration from langchain to langchain_core."""
with sup1(), sup2():
raw_migrations = generate_simplified_migrations(
from_package="langchain", to_package="langchain_core"
)
# SingleStore was an old name for VectorStoreRetriever
single_store_migration = [
migration for migration in raw_migrations if "SingleStore" in migration[0]
]
assert single_store_migration == [
(
"langchain.vectorstores.singlestoredb.SingleStoreDBRetriever",
"langchain_core.vectorstores.VectorStoreRetriever",
),
]
|
import pytest
from langchain._api import suppress_langchain_deprecation_warning as sup2
from langchain_core._api import suppress_langchain_deprecation_warning as sup1
from langchain_cli.namespaces.migrate.generate.generic import (
generate_simplified_migrations,
)
@pytest.mark.xfail(reason="Unknown reason")
def test_create_json_agent_migration() -> None:
"""Test the migration of create_json_agent from langchain to langchain_community."""
with sup1():
with sup2():
raw_migrations = generate_simplified_migrations(
from_package="langchain", to_package="langchain_community"
)
json_agent_migrations = [
migration
for migration in raw_migrations
if "create_json_agent" in migration[0]
]
assert json_agent_migrations == [
(
"langchain.agents.create_json_agent",
"langchain_community.agent_toolkits.create_json_agent",
),
(
"langchain.agents.agent_toolkits.create_json_agent",
"langchain_community.agent_toolkits.create_json_agent",
),
(
"langchain.agents.agent_toolkits.json.base.create_json_agent",
"langchain_community.agent_toolkits.create_json_agent",
),
]
@pytest.mark.xfail(reason="Unknown reason")
def test_create_single_store_retriever_db() -> None:
"""Test migration from langchain to langchain_core"""
with sup1():
with sup2():
raw_migrations = generate_simplified_migrations(
from_package="langchain", to_package="langchain_core"
)
# SingleStore was an old name for VectorStoreRetriever
single_store_migration = [
migration
for migration in raw_migrations
if "SingleStore" in migration[0]
]
assert single_store_migration == [
(
"langchain.vectorstores.singlestoredb.SingleStoreDBRetriever",
"langchain_core.vectorstores.VectorStoreRetriever",
),
]
|
"""
Prompts for implementing Chain of Abstraction.
While official prompts are not given (and the paper finetunes models for the task),
we can take inspiration and use few-shot prompting to generate a prompt for implementing
chain of abstraction in an LLM agent.
"""
REASONING_PROMPT_TEMPALTE = """Generate an abstract plan of reasoning using placeholders for the specific values and function calls needed.
The placeholders should be labeled y1, y2, etc.
Function calls should be represented as inline strings like [FUNC {{function_name}}({{input1}}, {{input2}}, ...) = {{output_placeholder}}].
Assume someone will read the plan after the functions have been executed in order to make a final response.
Not every question will require function calls to answer.
If you do invoke a function, only use the available functions, do not make up functions.
Example:
-----------
Available functions:
```python
def add(a: int, b: int) -> int:
\"\"\"Add two numbers together.\"\"\"
...
def multiply(a: int, b: int) -> int:
\"\"\"Multiply two numbers together.\"\"\"
...
```
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Abstract plan of reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = y1] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(y1, 3) = y2] apples.
Your Turn:
-----------
Available functions:
```python
{functions}
```
Question:
{question}
Abstract plan of reasoning:
"""
REFINE_REASONING_PROMPT_TEMPALTE = """Generate a response to a question by using a previous abstract plan of reasoning. Use the previous reasoning as context to write a response to the question.
Example:
-----------
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Previous reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = 5] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(5, 3) = 15] apples.
Response:
After the wizard casts the spell, Sally has 15 apples.
Your Turn:
-----------
Question:
{question}
Previous reasoning:
{prev_reasoning}
Response:
"""
|
"""
Prompts for implementing Chain of Abstraction.
While official prompts are not given (and the paper finetunes models for the task),
we can take inspiration and use few-shot prompting to generate a prompt for implementing
chain of abstraction in an LLM agent.
"""
REASONING_PROMPT_TEMPALTE = """Generate an abstract plan of reasoning using placeholders for the specific values and function calls needed.
The placeholders should be labeled y1, y2, etc.
Function calls should be represented as inline strings like [FUNC {{function_name}}({{input1}}, {{input2}}, ...) = {{output_placeholder}}].
Assume someone will read the plan after the functions have been executed in order to make a final response.
Not every question will require function calls to answer.
If you do invoke a function, only use the available functions, do not make up functions.
Example:
-----------
Available functions:
```python
def add(a: int, b: int) -> int:
\"\"\"Add two numbers together.\"\"\"
...
def multiply(a: int, b: int) -> int:
\"\"\"Multiply two numbers together.\"\"\"
...
```
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Abstract plan of reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = y1] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(y1, 3) = y2] apples.
Your Turn:
-----------
Available functions:
```python
{functions}
```
Question:
{question}
Abstract plan of reasoning:
"""
REFINE_REASONING_PROMPT_TEMPALTE = """Generate a response to a question by using a previous abstract plan of reasoning. Use the previous reasoning as context to write a response to the question.
Example:
-----------
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Previous reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = 5] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(5, 3) = 15] apples.
Response:
After the wizard casts the spell, Sally has 15 apples.
Your Turn:
-----------
Question:
{question}
Previous reasoning:
{prev_reasoning}
Response:
"""
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg)))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc, DocList
from docarray.index.backends.in_memory import InMemoryExactNNIndex
from docarray.typing import NdArray
class SchemaDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10]
@pytest.fixture
def docs():
docs = DocList[SchemaDoc](
[
SchemaDoc(text=f'hello {i}', price=i, tensor=np.array([i] * 10))
for i in range(9)
]
)
docs.append(SchemaDoc(text='good bye', price=100, tensor=np.array([100.0] * 10)))
return docs
def test_indexing(docs):
doc_index = InMemoryExactNNIndex[SchemaDoc]()
assert doc_index.num_docs() == 0
doc_index.index(docs)
assert doc_index.num_docs() == 10
@pytest.fixture
def doc_index(docs):
doc_index = InMemoryExactNNIndex[SchemaDoc]()
doc_index.index(docs)
return doc_index
def test_del_item(docs, doc_index):
to_remove = [docs[0].id, docs[1].id]
doc_index._del_items(to_remove)
assert doc_index.num_docs() == 8
def test_del(docs, doc_index):
del doc_index[docs[0].id]
assert doc_index.num_docs() == 9
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = MyDoc(text='query', price=0, tensor=np.ones(10))
else:
query = np.ones(10)
docs, scores = doc_index.find(query, search_field='tensor', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert doc_index.num_docs() == 10
empty_index = InMemoryExactNNIndex[MyDoc]()
docs, scores = empty_index.find(query, search_field='tensor', limit=5)
assert len(docs) == 0
assert len(scores) == 0
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find_batched(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = DocList[MyDoc](
[
MyDoc(text='query 0', price=0, tensor=np.zeros(10)),
MyDoc(text='query 1', price=1, tensor=np.ones(10)),
]
)
else:
query = np.ones((2, 10))
docs, scores = doc_index.find_batched(query, search_field='tensor', limit=5)
assert len(docs) == 2
for result in docs:
assert len(result) == 5
assert doc_index.num_docs() == 10
empty_index = InMemoryExactNNIndex[MyDoc]()
docs, scores = empty_index.find_batched(query, search_field='tensor', limit=5)
assert len(docs) == 0
assert len(scores) == 0
def test_concatenated_queries(doc_index):
query = SchemaDoc(text='query', price=0, tensor=np.ones(10))
q = (
doc_index.build_query()
.find(query=query, search_field='tensor', limit=5)
.filter(filter_query={'price': {'$neq': 5}})
.build()
)
docs, scores = doc_index.execute_query(q)
assert len(docs) == 4
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc, DocList
from docarray.index.backends.in_memory import InMemoryExactNNIndex
from docarray.typing import NdArray
class SchemaDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10]
@pytest.fixture
def docs():
docs = DocList[SchemaDoc](
[
SchemaDoc(text=f'hello {i}', price=i, tensor=np.array([i] * 10))
for i in range(9)
]
)
docs.append(SchemaDoc(text='good bye', price=100, tensor=np.array([100.0] * 10)))
return docs
def test_indexing(docs):
doc_index = InMemoryExactNNIndex[SchemaDoc]()
assert doc_index.num_docs() == 0
doc_index.index(docs)
assert doc_index.num_docs() == 10
@pytest.fixture
def doc_index(docs):
doc_index = InMemoryExactNNIndex[SchemaDoc]()
doc_index.index(docs)
return doc_index
def test_del_item(docs, doc_index):
to_remove = [docs[0].id, docs[1].id]
doc_index._del_items(to_remove)
assert doc_index.num_docs() == 8
def test_del(docs, doc_index):
del doc_index[docs[0].id]
assert doc_index.num_docs() == 9
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = MyDoc(text='query', price=0, tensor=np.ones(10))
else:
query = np.ones(10)
docs, scores = doc_index.find(query, search_field='tensor', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert doc_index.num_docs() == 10
@pytest.mark.parametrize('space', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist'])
@pytest.mark.parametrize('is_query_doc', [True, False])
def test_find_batched(doc_index, space, is_query_doc):
class MyDoc(BaseDoc):
text: str
price: int
tensor: NdArray[10] = Field(space=space)
if is_query_doc:
query = DocList[MyDoc](
[
MyDoc(text='query 0', price=0, tensor=np.zeros(10)),
MyDoc(text='query 1', price=1, tensor=np.ones(10)),
]
)
else:
query = np.ones((2, 10))
docs, scores = doc_index.find_batched(query, search_field='tensor', limit=5)
assert len(docs) == 2
for result in docs:
assert len(result) == 5
assert doc_index.num_docs() == 10
def test_concatenated_queries(doc_index):
query = SchemaDoc(text='query', price=0, tensor=np.ones(10))
q = (
doc_index.build_query()
.find(query=query, search_field='tensor', limit=5)
.filter(filter_query={'price': {'$neq': 5}})
.build()
)
docs, scores = doc_index.execute_query(q)
assert len(docs) == 4
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(fn, (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(fn, (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
|
from langchain_core.prompts.prompt import PromptTemplate
web_search_template = """Please write a passage to answer the question
Question: {QUESTION}
Passage:"""
web_search = PromptTemplate(template=web_search_template, input_variables=["QUESTION"])
sci_fact_template = """Please write a scientific paper passage to support/refute the claim
Claim: {Claim}
Passage:""" # noqa: E501
sci_fact = PromptTemplate(template=sci_fact_template, input_variables=["Claim"])
arguana_template = """Please write a counter argument for the passage
Passage: {PASSAGE}
Counter Argument:"""
arguana = PromptTemplate(template=arguana_template, input_variables=["PASSAGE"])
trec_covid_template = """Please write a scientific paper passage to answer the question
Question: {QUESTION}
Passage:"""
trec_covid = PromptTemplate(template=trec_covid_template, input_variables=["QUESTION"])
fiqa_template = """Please write a financial article passage to answer the question
Question: {QUESTION}
Passage:"""
fiqa = PromptTemplate(template=fiqa_template, input_variables=["QUESTION"])
dbpedia_entity_template = """Please write a passage to answer the question.
Question: {QUESTION}
Passage:"""
dbpedia_entity = PromptTemplate(
template=dbpedia_entity_template, input_variables=["QUESTION"]
)
trec_news_template = """Please write a news passage about the topic.
Topic: {TOPIC}
Passage:"""
trec_news = PromptTemplate(template=trec_news_template, input_variables=["TOPIC"])
mr_tydi_template = """Please write a passage in Swahili/Korean/Japanese/Bengali to answer the question in detail.
Question: {QUESTION}
Passage:""" # noqa: E501
mr_tydi = PromptTemplate(template=mr_tydi_template, input_variables=["QUESTION"])
PROMPT_MAP = {
"web_search": web_search,
"sci_fact": sci_fact,
"arguana": arguana,
"trec_covid": trec_covid,
"fiqa": fiqa,
"dbpedia_entity": dbpedia_entity,
"trec_news": trec_news,
"mr_tydi": mr_tydi,
}
|
# flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
web_search_template = """Please write a passage to answer the question
Question: {QUESTION}
Passage:"""
web_search = PromptTemplate(template=web_search_template, input_variables=["QUESTION"])
sci_fact_template = """Please write a scientific paper passage to support/refute the claim
Claim: {Claim}
Passage:"""
sci_fact = PromptTemplate(template=sci_fact_template, input_variables=["Claim"])
arguana_template = """Please write a counter argument for the passage
Passage: {PASSAGE}
Counter Argument:"""
arguana = PromptTemplate(template=arguana_template, input_variables=["PASSAGE"])
trec_covid_template = """Please write a scientific paper passage to answer the question
Question: {QUESTION}
Passage:"""
trec_covid = PromptTemplate(template=trec_covid_template, input_variables=["QUESTION"])
fiqa_template = """Please write a financial article passage to answer the question
Question: {QUESTION}
Passage:"""
fiqa = PromptTemplate(template=fiqa_template, input_variables=["QUESTION"])
dbpedia_entity_template = """Please write a passage to answer the question.
Question: {QUESTION}
Passage:"""
dbpedia_entity = PromptTemplate(
template=dbpedia_entity_template, input_variables=["QUESTION"]
)
trec_news_template = """Please write a news passage about the topic.
Topic: {TOPIC}
Passage:"""
trec_news = PromptTemplate(template=trec_news_template, input_variables=["TOPIC"])
mr_tydi_template = """Please write a passage in Swahili/Korean/Japanese/Bengali to answer the question in detail.
Question: {QUESTION}
Passage:"""
mr_tydi = PromptTemplate(template=mr_tydi_template, input_variables=["QUESTION"])
PROMPT_MAP = {
"web_search": web_search,
"sci_fact": sci_fact,
"arguana": arguana,
"trec_covid": trec_covid,
"fiqa": fiqa,
"dbpedia_entity": dbpedia_entity,
"trec_news": trec_news,
"mr_tydi": mr_tydi,
}
|
"""
This script downloads the WikiMatrix corpus (https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix)
and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages.
The WikiMatrix mined parallel sentences from Wikipedia in various languages.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import gzip
import os
import sentence_transformers.util
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
num_dev_sentences = 1000 # Number of sentences we want to use for development
threshold = 1.075 # Only use sentences with a LASER similarity score above the threshold
download_url = "https://dl.fbaipublicfiles.com/laser/WikiMatrix/v1/"
download_folder = "../datasets/WikiMatrix/"
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(download_folder), exist_ok=True)
os.makedirs(parallel_sentences_folder, exist_ok=True)
for source_lang in source_languages:
for target_lang in target_languages:
filename_train = os.path.join(
parallel_sentences_folder, "WikiMatrix-{}-{}-train.tsv.gz".format(source_lang, target_lang)
)
filename_dev = os.path.join(
parallel_sentences_folder, "WikiMatrix-{}-{}-dev.tsv.gz".format(source_lang, target_lang)
)
if not os.path.exists(filename_train) and not os.path.exists(filename_dev):
langs_ordered = sorted([source_lang, target_lang])
wikimatrix_filename = "WikiMatrix.{}-{}.tsv.gz".format(*langs_ordered)
wikimatrix_filepath = os.path.join(download_folder, wikimatrix_filename)
if not os.path.exists(wikimatrix_filepath):
print("Download", download_url + wikimatrix_filename)
try:
sentence_transformers.util.http_get(download_url + wikimatrix_filename, wikimatrix_filepath)
except Exception:
print("Was not able to download", download_url + wikimatrix_filename)
continue
if not os.path.exists(wikimatrix_filepath):
continue
train_sentences = []
dev_sentences = []
dev_sentences_set = set()
extract_dev_sentences = True
with gzip.open(wikimatrix_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
score, sent1, sent2 = line.strip().split("\t")
sent1 = sent1.strip()
sent2 = sent2.strip()
score = float(score)
if score < threshold:
break
if sent1 == sent2:
continue
if langs_ordered.index(source_lang) == 1: # Swap, so that src lang is sent1
sent1, sent2 = sent2, sent1
# Avoid duplicates in development set
if sent1 in dev_sentences_set or sent2 in dev_sentences_set:
continue
if extract_dev_sentences:
dev_sentences.append([sent1, sent2])
dev_sentences_set.add(sent1)
dev_sentences_set.add(sent2)
if len(dev_sentences) >= num_dev_sentences:
extract_dev_sentences = False
else:
train_sentences.append([sent1, sent2])
print("Write", len(dev_sentences), "dev sentences", filename_dev)
with gzip.open(filename_dev, "wt", encoding="utf8") as fOut:
for sents in dev_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("Write", len(train_sentences), "train sentences", filename_train)
with gzip.open(filename_train, "wt", encoding="utf8") as fOut:
for sents in train_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("---DONE---")
|
"""
This script downloads the WikiMatrix corpus (https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix)
and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages.
The WikiMatrix mined parallel sentences from Wikipedia in various languages.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import os
import sentence_transformers.util
import gzip
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
num_dev_sentences = 1000 # Number of sentences we want to use for development
threshold = 1.075 # Only use sentences with a LASER similarity score above the threshold
download_url = "https://dl.fbaipublicfiles.com/laser/WikiMatrix/v1/"
download_folder = "../datasets/WikiMatrix/"
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(download_folder), exist_ok=True)
os.makedirs(parallel_sentences_folder, exist_ok=True)
for source_lang in source_languages:
for target_lang in target_languages:
filename_train = os.path.join(
parallel_sentences_folder, "WikiMatrix-{}-{}-train.tsv.gz".format(source_lang, target_lang)
)
filename_dev = os.path.join(
parallel_sentences_folder, "WikiMatrix-{}-{}-dev.tsv.gz".format(source_lang, target_lang)
)
if not os.path.exists(filename_train) and not os.path.exists(filename_dev):
langs_ordered = sorted([source_lang, target_lang])
wikimatrix_filename = "WikiMatrix.{}-{}.tsv.gz".format(*langs_ordered)
wikimatrix_filepath = os.path.join(download_folder, wikimatrix_filename)
if not os.path.exists(wikimatrix_filepath):
print("Download", download_url + wikimatrix_filename)
try:
sentence_transformers.util.http_get(download_url + wikimatrix_filename, wikimatrix_filepath)
except Exception:
print("Was not able to download", download_url + wikimatrix_filename)
continue
if not os.path.exists(wikimatrix_filepath):
continue
train_sentences = []
dev_sentences = []
dev_sentences_set = set()
extract_dev_sentences = True
with gzip.open(wikimatrix_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
score, sent1, sent2 = line.strip().split("\t")
sent1 = sent1.strip()
sent2 = sent2.strip()
score = float(score)
if score < threshold:
break
if sent1 == sent2:
continue
if langs_ordered.index(source_lang) == 1: # Swap, so that src lang is sent1
sent1, sent2 = sent2, sent1
# Avoid duplicates in development set
if sent1 in dev_sentences_set or sent2 in dev_sentences_set:
continue
if extract_dev_sentences:
dev_sentences.append([sent1, sent2])
dev_sentences_set.add(sent1)
dev_sentences_set.add(sent2)
if len(dev_sentences) >= num_dev_sentences:
extract_dev_sentences = False
else:
train_sentences.append([sent1, sent2])
print("Write", len(dev_sentences), "dev sentences", filename_dev)
with gzip.open(filename_dev, "wt", encoding="utf8") as fOut:
for sents in dev_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("Write", len(train_sentences), "train sentences", filename_train)
with gzip.open(filename_train, "wt", encoding="utf8") as fOut:
for sents in train_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("---DONE---")
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import CSRReconstructionLoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class CSRLoss(nn.Module):
def __init__(
self,
model: SparseEncoder,
beta: float = 0.1,
gamma: float = 1.0,
scale: float = 1.0,
similarity_fct=util.dot_score,
):
"""
CSRLoss implements a combined loss function for Contrastive Sparse Representation (CSR) models.
This loss combines two components:
1. A reconstruction loss :class:`CSRReconstructionLoss` that ensures the sparse representation can faithfully
reconstruct the original embedding.
2. A contrastive learning component :class:`SparseMultipleNegativesRankingLoss` that ensures semantically
similar sentences have similar representations.
The total loss is linear combination of the two losses.
Args:
model: SparseEncoder model
beta: Weight for the L_aux component in the reconstruction loss. Default is 0.1.
gamma: Weight for the contrastive MRL loss component. Default is 1.0.
scale: Scale factor for the similarity scores in the MRL loss. Default is 1.0.
similarity_fct: Similarity function to use for the MRL loss. Default is dot product.
References:
- For more details, see the paper "Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation"
https://arxiv.org/abs/2503.01776
Requirements:
1. Sentence pairs or triplets for the MRL component
2. Uses autoencoder components of the SparseEncoder model
Relations:
- Uses :class:`CSRReconstructionLoss` for the reconstruction component
- Uses :class:`SparseMultipleNegativesRankingLoss` for the contrastive component
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("sentence-transformers/all-MiniLM-L6-v2")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.CSRLoss(model, beta=0.1, gamma=1.0, scale=20.0)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__()
self.model = model
self.beta = beta
self.gamma = gamma
self.scale = scale
# Initialize the component losses
self.reconstruction_loss = CSRReconstructionLoss(model=model, beta=beta)
self.ranking_loss = SparseMultipleNegativesRankingLoss(model=model, scale=scale, similarity_fct=similarity_fct)
def forward(
self, sentence_features: Iterable[dict[str, torch.Tensor]], labels: torch.Tensor = None
) -> dict[str, torch.Tensor]:
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
sentence_embedding = [output["sentence_embedding"] for output in outputs]
recon_loss = self.reconstruction_loss.compute_loss_from_embeddings(outputs)
ranking_loss = self.ranking_loss.compute_loss_from_embeddings(sentence_embedding, labels)
# Compute total loss: L_CSR = L_recon + γ * L_MRL
total_loss = recon_loss + self.gamma * ranking_loss
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta, "gamma": self.gamma, "scale": self.scale}
@property
def citation(self) -> str:
return """
@misc{wen2025matryoshkarevisitingsparsecoding,
title={Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation},
author={Tiansheng Wen and Yifei Wang and Zequn Zeng and Zhong Peng and Yudi Su and Xinyang Liu and Bo Chen and Hongwei Liu and Stefanie Jegelka and Chenyu You},
year={2025},
eprint={2503.01776},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2503.01776},
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import CSRReconstructionLoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class CSRLoss(nn.Module):
def __init__(self, model: SparseEncoder, beta: float = 0.1, gamma: float = 1.0, scale: float = 20.0):
"""
CSRLoss implements a combined loss function for Contrastive Sparse Representation (CSR) models.
This loss combines two components:
1. A reconstruction loss :class:`CSRReconstructionLoss` that ensures the sparse representation can faithfully
reconstruct the original embedding.
2. A contrastive learning component :class:`SparseMultipleNegativesRankingLoss` that ensures semantically
similar sentences have similar representations.
The total loss is linear combination of the two losses.
Args:
model: SparseEncoder model
beta: Weight for the L_aux component in the reconstruction loss
gamma: Weight for the contrastive MRL loss component
scale: Scale factor for the similarity scores in the MRL loss
References:
- For more details, see the paper "Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation"
https://arxiv.org/abs/2503.01776
Requirements:
1. Sentence pairs or triplets for the MRL component
2. Uses autoencoder components of the SparseEncoder model
Relations:
- Uses :class:`CSRReconstructionLoss` for the reconstruction component
- Uses :class:`SparseMultipleNegativesRankingLoss` for the contrastive component
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("sentence-transformers/all-MiniLM-L6-v2")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.CSRLoss(model, beta=0.1, gamma=1.0, scale=20.0)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__()
self.model = model
self.beta = beta
self.gamma = gamma
self.scale = scale
# Initialize the component losses
self.reconstruction_loss = CSRReconstructionLoss(model, beta)
self.ranking_loss = SparseMultipleNegativesRankingLoss(model, scale)
def forward(
self, sentence_features: Iterable[dict[str, torch.Tensor]], labels: torch.Tensor = None
) -> dict[str, torch.Tensor]:
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
sentence_embedding = [output["sentence_embedding"] for output in outputs]
recon_loss = self.reconstruction_loss.compute_loss_from_embeddings(outputs)
ranking_loss = self.ranking_loss.compute_loss_from_embeddings(sentence_embedding, labels)
# Compute total loss: L_CSR = L_recon + γ * L_MRL
total_loss = recon_loss + self.gamma * ranking_loss
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta, "gamma": self.gamma, "scale": self.scale}
@property
def citation(self) -> str:
return """
@misc{wen2025matryoshkarevisitingsparsecoding,
title={Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation},
author={Tiansheng Wen and Yifei Wang and Zequn Zeng and Zhong Peng and Yudi Su and Xinyang Liu and Bo Chen and Hongwei Liu and Stefanie Jegelka and Chenyu You},
year={2025},
eprint={2503.01776},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2503.01776},
}
"""
|
"""Common test fixtures with proper setup and teardown."""
from contextlib import asynccontextmanager
from typing import AsyncGenerator
from unittest.mock import Mock, patch
import pytest
from prisma import Prisma
@pytest.fixture
async def test_db_connection() -> AsyncGenerator[Prisma, None]:
"""Provide a test database connection with proper cleanup.
This fixture ensures the database connection is properly
closed after the test, even if the test fails.
"""
db = Prisma()
try:
await db.connect()
yield db
finally:
await db.disconnect()
@pytest.fixture
def mock_transaction():
"""Mock database transaction with proper async context manager."""
@asynccontextmanager
async def mock_context(*args, **kwargs):
yield None
with patch("backend.data.db.locked_transaction", side_effect=mock_context) as mock:
yield mock
@pytest.fixture
def isolated_app_state():
"""Fixture that ensures app state is isolated between tests."""
# Example: Save original state
# from backend.server.app import app
# original_overrides = app.dependency_overrides.copy()
# try:
# yield app
# finally:
# # Restore original state
# app.dependency_overrides = original_overrides
# For now, just yield None as this is an example
yield None
@pytest.fixture
def cleanup_files():
"""Fixture to track and cleanup files created during tests."""
created_files = []
def track_file(filepath: str):
created_files.append(filepath)
yield track_file
# Cleanup
import os
for filepath in created_files:
try:
if os.path.exists(filepath):
os.remove(filepath)
except Exception as e:
print(f"Warning: Failed to cleanup {filepath}: {e}")
@pytest.fixture
async def async_mock_with_cleanup():
"""Create async mocks that are properly cleaned up."""
mocks = []
def create_mock(**kwargs):
mock = Mock(**kwargs)
mocks.append(mock)
return mock
yield create_mock
# Reset all mocks
for mock in mocks:
mock.reset_mock()
class TestDatabaseIsolation:
"""Example of proper test isolation with database operations."""
@pytest.fixture(autouse=True)
async def setup_and_teardown(self, test_db_connection):
"""Setup and teardown for each test method."""
# Setup: Clear test data
await test_db_connection.user.delete_many(
where={"email": {"contains": "@test.example"}}
)
yield
# Teardown: Clear test data again
await test_db_connection.user.delete_many(
where={"email": {"contains": "@test.example"}}
)
@pytest.fixture(scope="session")
async def test_create_user(self, test_db_connection):
"""Test that demonstrates proper isolation."""
# This test has access to a clean database
user = await test_db_connection.user.create(
data={
"id": "test-user-id",
"email": "test@test.example",
"name": "Test User",
}
)
assert user.email == "test@test.example"
# User will be cleaned up automatically
@pytest.fixture(scope="function") # Explicitly use function scope
def reset_singleton_state():
"""Reset singleton state between tests."""
# Example: Reset a singleton instance
# from backend.data.some_singleton import SingletonClass
# # Save original state
# original_instance = getattr(SingletonClass, "_instance", None)
# try:
# # Clear singleton
# SingletonClass._instance = None
# yield
# finally:
# # Restore original state
# SingletonClass._instance = original_instance
# For now, just yield None as this is an example
yield None
|
"""Common test fixtures with proper setup and teardown."""
from contextlib import asynccontextmanager
from typing import AsyncGenerator
from unittest.mock import Mock, patch
import pytest
from prisma import Prisma
@pytest.fixture
async def test_db_connection() -> AsyncGenerator[Prisma, None]:
"""Provide a test database connection with proper cleanup.
This fixture ensures the database connection is properly
closed after the test, even if the test fails.
"""
db = Prisma()
try:
await db.connect()
yield db
finally:
await db.disconnect()
@pytest.fixture
def mock_transaction():
"""Mock database transaction with proper async context manager."""
@asynccontextmanager
async def mock_context(*args, **kwargs):
yield None
with patch("backend.data.db.locked_transaction", side_effect=mock_context) as mock:
yield mock
@pytest.fixture
def isolated_app_state():
"""Fixture that ensures app state is isolated between tests."""
# Example: Save original state
# from backend.server.app import app
# original_overrides = app.dependency_overrides.copy()
# try:
# yield app
# finally:
# # Restore original state
# app.dependency_overrides = original_overrides
# For now, just yield None as this is an example
yield None
@pytest.fixture
def cleanup_files():
"""Fixture to track and cleanup files created during tests."""
created_files = []
def track_file(filepath: str):
created_files.append(filepath)
yield track_file
# Cleanup
import os
for filepath in created_files:
try:
if os.path.exists(filepath):
os.remove(filepath)
except Exception as e:
print(f"Warning: Failed to cleanup {filepath}: {e}")
@pytest.fixture
async def async_mock_with_cleanup():
"""Create async mocks that are properly cleaned up."""
mocks = []
def create_mock(**kwargs):
mock = Mock(**kwargs)
mocks.append(mock)
return mock
yield create_mock
# Reset all mocks
for mock in mocks:
mock.reset_mock()
class TestDatabaseIsolation:
"""Example of proper test isolation with database operations."""
@pytest.fixture(autouse=True)
async def setup_and_teardown(self, test_db_connection):
"""Setup and teardown for each test method."""
# Setup: Clear test data
await test_db_connection.user.delete_many(
where={"email": {"contains": "@test.example"}}
)
yield
# Teardown: Clear test data again
await test_db_connection.user.delete_many(
where={"email": {"contains": "@test.example"}}
)
async def test_create_user(self, test_db_connection):
"""Test that demonstrates proper isolation."""
# This test has access to a clean database
user = await test_db_connection.user.create(
data={"email": "test@test.example", "name": "Test User"}
)
assert user.email == "test@test.example"
# User will be cleaned up automatically
@pytest.fixture(scope="function") # Explicitly use function scope
def reset_singleton_state():
"""Reset singleton state between tests."""
# Example: Reset a singleton instance
# from backend.data.some_singleton import SingletonClass
# # Save original state
# original_instance = getattr(SingletonClass, "_instance", None)
# try:
# # Clear singleton
# SingletonClass._instance = None
# yield
# finally:
# # Restore original state
# SingletonClass._instance = original_instance
# For now, just yield None as this is an example
yield None
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
from typing import Annotated, Union
from fastapi import FastAPI, Query
app = FastAPI()
@app.get("/items/")
async def read_items(q: Annotated[Union[str, None], Query(min_length=3)]):
results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]}
if q:
results.update({"q": q})
return results
|
from typing import Annotated, Union
from fastapi import FastAPI, Query
app = FastAPI()
@app.get("/items/")
async def read_items(q: Annotated[Union[str, None], Query(min_length=3)] = ...):
results = {"items": [{"item_id": "Foo"}, {"item_id": "Bar"}]}
if q:
results.update({"q": q})
return results
|
from collections import defaultdict
import torch
import transforms as reference_transforms
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
return torchvision.transforms.v2, torchvision.datapoints
else:
return reference_transforms, None
class DetectionPresetTrain:
# Note: this transform assumes that the input to forward() are always PIL
# images, regardless of the backend parameter.
def __init__(
self,
*,
data_augmentation,
hflip_prob=0.5,
mean=(123.0, 117.0, 104.0),
backend="pil",
use_v2=False,
):
T, datapoints = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImage())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if data_augmentation == "hflip":
transforms += [T.RandomHorizontalFlip(p=hflip_prob)]
elif data_augmentation == "lsj":
transforms += [
T.ScaleJitter(target_size=(1024, 1024), antialias=True),
# TODO: FixedSizeCrop below doesn't work on tensors!
reference_transforms.FixedSizeCrop(size=(1024, 1024), fill=mean),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "multiscale":
transforms += [
T.RandomShortestSize(min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssd":
fill = defaultdict(lambda: mean, {datapoints.Mask: 0}) if use_v2 else list(mean)
transforms += [
T.RandomPhotometricDistort(),
T.RandomZoomOut(fill=fill),
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssdlite":
transforms += [
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
else:
raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
if backend == "pil":
# Note: we could just convert to pure tensors even in v2.
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
transforms += [T.ConvertImageDtype(torch.float)]
if use_v2:
transforms += [
T.ConvertBoundingBoxFormat(datapoints.BoundingBoxFormat.XYXY),
T.SanitizeBoundingBoxes(),
T.ToPureTensor(),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class DetectionPresetEval:
def __init__(self, backend="pil", use_v2=False):
T, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
elif backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImage()]
else:
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.ConvertImageDtype(torch.float)]
if use_v2:
transforms += [T.ToPureTensor()]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
from collections import defaultdict
import torch
import transforms as reference_transforms
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
return torchvision.transforms.v2, torchvision.datapoints
else:
return reference_transforms, None
class DetectionPresetTrain:
# Note: this transform assumes that the input to forward() are always PIL
# images, regardless of the backend parameter.
def __init__(
self,
*,
data_augmentation,
hflip_prob=0.5,
mean=(123.0, 117.0, 104.0),
backend="pil",
use_v2=False,
):
T, datapoints = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImage())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if data_augmentation == "hflip":
transforms += [T.RandomHorizontalFlip(p=hflip_prob)]
elif data_augmentation == "lsj":
transforms += [
T.ScaleJitter(target_size=(1024, 1024), antialias=True),
# TODO: FixedSizeCrop below doesn't work on tensors!
reference_transforms.FixedSizeCrop(size=(1024, 1024), fill=mean),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "multiscale":
transforms += [
T.RandomShortestSize(min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssd":
fill = defaultdict(lambda: mean, {datapoints.Mask: 0}) if use_v2 else list(mean)
transforms += [
T.RandomPhotometricDistort(),
T.RandomZoomOut(fill=fill),
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssdlite":
transforms += [
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
else:
raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
if backend == "pil":
# Note: we could just convert to pure tensors even in v2.
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
transforms += [T.ConvertImageDtype(torch.float)]
if use_v2:
transforms += [
T.ConvertBoundingBoxFormat(datapoints.BoundingBoxFormat.XYXY),
T.SanitizeBoundingBoxes(),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class DetectionPresetEval:
def __init__(self, backend="pil", use_v2=False):
T, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
elif backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImage()]
else:
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.ConvertImageDtype(torch.float)]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
from __future__ import annotations
from typing import Any
import torch
from torch import nn
from transformers import AutoConfig, AutoModelForMaskedLM, AutoTokenizer
# TODO: Check the tokenizer problem and if more need to be implement like the Transformer class
class MLMTransformer(nn.Module):
"""A minimal Transformer model that uses MLM (Masked Language Modeling).
This model implements only the essential functionality needed for MLM,
without inheriting from the base Transformer class.
Args:
model_name_or_path: Hugging Face models name
max_seq_length: Truncate any inputs longer than max_seq_length
model_args: Keyword arguments passed to the Hugging Face Transformers model
tokenizer_args: Keyword arguments passed to the Hugging Face Transformers tokenizer
config_args: Keyword arguments passed to the Hugging Face Transformers config
cache_dir: Cache dir for Hugging Face Transformers to store/load models
do_lower_case: If true, lowercases the input
tokenizer_name_or_path: Name or path of the tokenizer
"""
def __init__(
self,
model_name_or_path: str,
max_seq_length: int | None = None,
model_args: dict[str, Any] | None = None,
tokenizer_args: dict[str, Any] | None = None,
config_args: dict[str, Any] | None = None,
cache_dir: str | None = None,
do_lower_case: bool = False,
tokenizer_name_or_path: str | None = None,
) -> None:
super().__init__()
# Set default values for optional arguments
if model_args is None:
model_args = {}
if tokenizer_args is None:
tokenizer_args = {}
if config_args is None:
config_args = {}
# Load config
self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir, **config_args)
# Load tokenizer
if max_seq_length is not None and "model_max_length" not in tokenizer_args:
tokenizer_args["model_max_length"] = max_seq_length
self.tokenizer = AutoTokenizer.from_pretrained(
(tokenizer_name_or_path if tokenizer_name_or_path is not None else model_name_or_path),
cache_dir=cache_dir,
**tokenizer_args,
)
# Set max_seq_length
self.max_seq_length = max_seq_length
if max_seq_length is None:
if hasattr(self.config, "max_position_embeddings") and hasattr(self.tokenizer, "model_max_length"):
self.max_seq_length = min(self.config.max_position_embeddings, self.tokenizer.model_max_length)
# Load MLM model
self.auto_model = AutoModelForMaskedLM.from_pretrained(
model_name_or_path, config=self.config, cache_dir=cache_dir, **model_args
)
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the model.
Args:
features: Dictionary containing input features
Returns:
Dictionary containing token embeddings and MLM logits
"""
# Get MLM outputs
mlm_outputs = self.auto_model(**features)
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = mlm_outputs.logits
return {"mlm_logits": mlm_logits}
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the token embeddings"""
return self.auto_model.config.hidden_size
def tokenize(self, texts: list[str], padding: bool = True) -> dict[str, torch.Tensor]:
"""Tokenize the input texts.
Args:
texts: List of texts to tokenize
padding: Whether to pad the sequences
Returns:
Dictionary containing tokenized inputs
"""
# Check if the model is a DistilBERT model
is_distilbert = "distilbert" in self.auto_model.config.model_type.lower()
# For DistilBERT models, we need to exclude token_type_ids
if is_distilbert:
return self.tokenizer(
texts,
padding=padding,
truncation=True,
max_length=self.max_seq_length,
return_tensors="pt",
return_token_type_ids=False, # Exclude token_type_ids for DistilBERT
)
else:
return self.tokenizer(
texts,
padding=padding,
truncation=True,
max_length=self.max_seq_length,
return_tensors="pt",
)
|
from __future__ import annotations
from typing import Any
import torch
from torch import nn
from transformers import AutoConfig, AutoModelForMaskedLM, AutoTokenizer
class MLMTransformer(nn.Module):
"""A minimal Transformer model that uses MLM (Masked Language Modeling).
This model implements only the essential functionality needed for MLM,
without inheriting from the base Transformer class.
Args:
model_name_or_path: Hugging Face models name
max_seq_length: Truncate any inputs longer than max_seq_length
model_args: Keyword arguments passed to the Hugging Face Transformers model
tokenizer_args: Keyword arguments passed to the Hugging Face Transformers tokenizer
config_args: Keyword arguments passed to the Hugging Face Transformers config
cache_dir: Cache dir for Hugging Face Transformers to store/load models
do_lower_case: If true, lowercases the input
tokenizer_name_or_path: Name or path of the tokenizer
"""
def __init__(
self,
model_name_or_path: str,
max_seq_length: int | None = None,
model_args: dict[str, Any] | None = None,
tokenizer_args: dict[str, Any] | None = None,
config_args: dict[str, Any] | None = None,
cache_dir: str | None = None,
do_lower_case: bool = False,
tokenizer_name_or_path: str | None = None,
) -> None:
super().__init__()
# Set default values for optional arguments
if model_args is None:
model_args = {}
if tokenizer_args is None:
tokenizer_args = {}
if config_args is None:
config_args = {}
# Load config
self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir, **config_args)
# Load tokenizer
if max_seq_length is not None and "model_max_length" not in tokenizer_args:
tokenizer_args["model_max_length"] = max_seq_length
self.tokenizer = AutoTokenizer.from_pretrained(
(tokenizer_name_or_path if tokenizer_name_or_path is not None else model_name_or_path),
cache_dir=cache_dir,
**tokenizer_args,
)
# Set max_seq_length
self.max_seq_length = max_seq_length
if max_seq_length is None:
if hasattr(self.config, "max_position_embeddings") and hasattr(self.tokenizer, "model_max_length"):
self.max_seq_length = min(self.config.max_position_embeddings, self.tokenizer.model_max_length)
# Load MLM model
self.auto_model = AutoModelForMaskedLM.from_pretrained(
model_name_or_path, config=self.config, cache_dir=cache_dir, **model_args
)
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the model.
Args:
features: Dictionary containing input features
Returns:
Dictionary containing token embeddings and MLM logits
"""
# Get MLM outputs
mlm_outputs = self.auto_model(**features)
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = mlm_outputs.logits
return {"mlm_logits": mlm_logits}
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the token embeddings"""
return self.auto_model.config.hidden_size
def tokenize(self, texts: list[str], padding: bool = True) -> dict[str, torch.Tensor]:
"""Tokenize the input texts.
Args:
texts: List of texts to tokenize
padding: Whether to pad the sequences
Returns:
Dictionary containing tokenized inputs
"""
# Check if the model is a DistilBERT model
is_distilbert = "distilbert" in self.auto_model.config.model_type.lower()
# For DistilBERT models, we need to exclude token_type_ids
if is_distilbert:
return self.tokenizer(
texts,
padding=padding,
truncation=True,
max_length=self.max_seq_length,
return_tensors="pt",
return_token_type_ids=False, # Exclude token_type_ids for DistilBERT
)
else:
return self.tokenizer(
texts,
padding=padding,
truncation=True,
max_length=self.max_seq_length,
return_tensors="pt",
)
|
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_fireworks import ChatFireworks
class TestFireworksStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatFireworks
@property
def chat_model_params(self) -> dict:
return {
"model": "accounts/fireworks/models/llama-v3p1-70b-instruct",
"api_key": "test_api_key",
}
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return (
{
"FIREWORKS_API_KEY": "api_key",
"FIREWORKS_API_BASE": "https://base.com",
},
{
"model": "accounts/fireworks/models/llama-v3p1-70b-instruct",
},
{
"fireworks_api_key": "api_key",
"fireworks_api_base": "https://base.com",
},
)
|
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_fireworks import ChatFireworks
class TestFireworksStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatFireworks
@property
def chat_model_params(self) -> dict:
return {"api_key": "test_api_key"}
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return (
{
"FIREWORKS_API_KEY": "api_key",
"FIREWORKS_API_BASE": "https://base.com",
},
{},
{
"fireworks_api_key": "api_key",
"fireworks_api_base": "https://base.com",
},
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
@HEADS.register_module()
class GlobalContextHead(BaseModule):
"""Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
num_convs (int, optional): number of convolutional layer in GlbCtxHead.
Default: 4.
in_channels (int, optional): number of input channels. Default: 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Default: 256.
num_classes (int, optional): number of classes. Default: 80.
loss_weight (float, optional): global context loss weight. Default: 1.
conv_cfg (dict, optional): config to init conv layer. Default: None.
norm_cfg (dict, optional): config to init norm layer. Default: None.
conv_to_res (bool, optional): if True, 2 convs will be grouped into
1 `SimplifiedBasicBlock` using a skip connection. Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_weight=1.0,
conv_cfg=None,
norm_cfg=None,
conv_to_res=False,
init_cfg=dict(
type='Normal', std=0.01, override=dict(name='fc'))):
super(GlobalContextHead, self).__init__(init_cfg)
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv_to_res = conv_to_res
self.fp16_enabled = False
if self.conv_to_res:
num_res_blocks = num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
else:
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(conv_out_channels, num_classes)
self.criterion = nn.BCEWithLogitsLoss()
@auto_fp16()
def forward(self, feats):
"""Forward function."""
x = feats[-1]
for i in range(self.num_convs):
x = self.convs[i](x)
x = self.pool(x)
# multi-class prediction
mc_pred = x.reshape(x.size(0), -1)
mc_pred = self.fc(mc_pred)
return mc_pred, x
@force_fp32(apply_to=('pred', ))
def loss(self, pred, labels):
"""Loss function."""
labels = [lbl.unique() for lbl in labels]
targets = pred.new_zeros(pred.size())
for i, label in enumerate(labels):
targets[i, label] = 1.0
loss = self.loss_weight * self.criterion(pred, targets)
return loss
|
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
@HEADS.register_module()
class GlobalContextHead(BaseModule):
"""Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
num_convs (int, optional): number of convolutional layer in GlbCtxHead.
Default: 4.
in_channels (int, optional): number of input channels. Default: 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Default: 256.
num_classes (int, optional): number of classes. Default: 80.
loss_weight (float, optional): global context loss weight. Default: 1.
conv_cfg (dict, optional): config to init conv layer. Default: None.
norm_cfg (dict, optional): config to init norm layer. Default: None.
conv_to_res (bool, optional): if True, 2 convs will be grouped into
1 `SimplifiedBasicBlock` using a skip connection. Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_weight=1.0,
conv_cfg=None,
norm_cfg=None,
conv_to_res=False,
init_cfg=dict(
type='Normal', std=0.01, override=dict(name='fc'))):
super(GlobalContextHead, self).__init__(init_cfg)
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv_to_res = conv_to_res
self.fp16_enabled = False
if self.conv_to_res:
num_res_blocks = num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
else:
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(conv_out_channels, num_classes)
self.criterion = nn.BCEWithLogitsLoss()
@auto_fp16()
def forward(self, feats):
"""Forward function."""
x = feats[-1]
for i in range(self.num_convs):
x = self.convs[i](x)
x = self.pool(x)
# multi-class prediction
mc_pred = x.reshape(x.size(0), -1)
mc_pred = self.fc(mc_pred)
return mc_pred, x
@force_fp32(apply_to=('pred', ))
def loss(self, pred, labels):
"""Loss function."""
labels = [lbl.unique() for lbl in labels]
targets = pred.new_zeros(pred.size())
for i, label in enumerate(labels):
targets[i, label] = 1.0
loss = self.loss_weight * self.criterion(pred, targets)
return loss
|
import numpy as np
import pytest
from keras.src import layers
from keras.src.testing import test_case
class ActivityRegularizationTest(test_case.TestCase):
def test_correctness(self):
layer = layers.ActivityRegularization(l1=0.2, l2=0.3)
layer(2 * np.ones((1,)))
self.assertLen(layer.losses, 1)
self.assertAllClose(layer.losses[0], 4 * 0.3 + 2 * 0.2)
@pytest.mark.requires_trainable_backend
def test_activity_regularization_basics(self):
self.run_layer_test(
layers.ActivityRegularization,
{"l1": 0.1, "l2": 0.2},
input_shape=(2, 3),
input_dtype="float32",
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=1,
supports_masking=True,
assert_built_after_instantiation=True,
)
|
import numpy as np
import pytest
from keras.src import layers
from keras.src.testing import test_case
class ActivityRegularizationTest(test_case.TestCase):
def test_correctness(self):
layer = layers.ActivityRegularization(l1=0.2, l2=0.3)
layer(2 * np.ones((1,)))
self.assertLen(layer.losses, 1)
self.assertAllClose(layer.losses[0], 4 * 0.3 + 2 * 0.2)
@pytest.mark.requires_trainable_backend
def test_activity_regularization_basics(self):
self.run_layer_test(
layers.ActivityRegularization,
{"l1": 0.1, "l2": 0.2},
input_shape=(2, 3),
input_dtype="float32",
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=1,
supports_masking=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from mmengine.hooks import IterTimerHook
from mmengine.logging import MessageHub
def time_patch():
if not hasattr(time_patch, 'time'):
time_patch.time = 0
else:
time_patch.time += 1
return time_patch.time
class TestIterTimerHook(TestCase):
def setUp(self) -> None:
self.hook = IterTimerHook()
def test_init(self):
assert self.hook.time_sec_tot == 0
assert self.hook.start_iter == 0
def test_before_run(self):
runner = MagicMock()
runner.iter = 1
self.hook.before_run(runner)
assert self.hook.start_iter == 1
def test_before_epoch(self):
runner = Mock()
self.hook._before_epoch(runner)
assert isinstance(self.hook.t, float)
@patch('time.time', MagicMock(return_value=1))
def test_before_iter(self):
runner = MagicMock()
runner.log_buffer = dict()
self.hook._before_epoch(runner)
for mode in ('train', 'val', 'test'):
self.hook._before_iter(runner, batch_idx=1, mode=mode)
runner.message_hub.update_scalar.assert_called_with(
f'{mode}/data_time', 0)
@patch('time.time', time_patch)
def test_after_iter(self):
runner = MagicMock()
runner.log_buffer = dict()
runner.log_processor.window_size = 10
runner.train_loop.max_iters = 100
runner.iter = 0
runner.test_loop.dataloader = [0] * 20
runner.val_loop.dataloader = [0] * 20
self.hook._before_epoch(runner)
self.hook.before_run(runner)
self.hook._after_iter(runner, batch_idx=1)
runner.message_hub.update_scalar.assert_called()
runner.message_hub.get_log.assert_not_called()
runner.message_hub.update_info.assert_not_called()
runner.message_hub = MessageHub.get_instance('test_iter_timer_hook')
runner.iter = 9
# eta = (100 - 10) / 1
self.hook._after_iter(runner, batch_idx=89)
assert runner.message_hub.get_info('eta') == 90
self.hook._after_iter(runner, batch_idx=9, mode='val')
assert runner.message_hub.get_info('eta') == 10
self.hook._after_iter(runner, batch_idx=19, mode='test')
assert runner.message_hub.get_info('eta') == 0
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import IterTimerHook
class TestIterTimerHook:
def test_before_epoch(self):
hook = IterTimerHook()
runner = Mock()
hook._before_epoch(runner)
assert isinstance(hook.t, float)
def test_before_iter(self):
hook = IterTimerHook()
runner = Mock()
runner.log_buffer = dict()
hook._before_epoch(runner)
hook._before_iter(runner, 0)
runner.message_hub.update_scalar.assert_called()
def test_after_iter(self):
hook = IterTimerHook()
runner = Mock()
runner.log_buffer = dict()
hook._before_epoch(runner)
hook._after_iter(runner, 0)
runner.message_hub.update_scalar.assert_called()
|
from unittest.mock import patch
import pytest
from llama_index.utils.workflow import (
draw_all_possible_flows,
draw_most_recent_execution,
)
@pytest.mark.asyncio
async def test_workflow_draw_methods(workflow):
with patch("pyvis.network.Network") as mock_network:
draw_all_possible_flows(workflow, filename="test_all_flows.html")
mock_network.assert_called_once()
mock_network.return_value.show.assert_called_once_with(
"test_all_flows.html", notebook=False
)
await workflow.run()
with patch("pyvis.network.Network") as mock_network:
draw_most_recent_execution(workflow, filename="test_recent_execution.html")
mock_network.assert_called_once()
mock_network.return_value.show.assert_called_once_with(
"test_recent_execution.html", notebook=False
)
|
from unittest.mock import patch
import pytest
from llama_index.utils.workflow import (
draw_all_possible_flows,
draw_most_recent_execution,
)
@pytest.mark.asyncio()
async def test_workflow_draw_methods(workflow):
with patch("pyvis.network.Network") as mock_network:
draw_all_possible_flows(workflow, filename="test_all_flows.html")
mock_network.assert_called_once()
mock_network.return_value.show.assert_called_once_with(
"test_all_flows.html", notebook=False
)
await workflow.run()
with patch("pyvis.network.Network") as mock_network:
draw_most_recent_execution(workflow, filename="test_recent_execution.html")
mock_network.assert_called_once()
mock_network.return_value.show.assert_called_once_with(
"test_recent_execution.html", notebook=False
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmdet.registry import MODELS
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def balanced_l1_loss(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='mean'):
"""Calculate balanced L1 loss.
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
beta (float): The loss is a piecewise function of prediction and target
and ``beta`` serves as a threshold for the difference between the
prediction and target. Defaults to 1.0.
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss.
Defaults to 1.5.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
return loss
@MODELS.register_module()
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss.
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Args:
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
beta (float, optional): The loss is a piecewise function of prediction
and target. ``beta`` serves as a threshold for the difference
between the prediction and target. Defaults to 1.0.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self,
alpha=0.5,
gamma=1.5,
beta=1.0,
reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
weight (torch.Tensor, optional): Sample-wise loss weight with
shape (N, ).
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def balanced_l1_loss(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='mean'):
"""Calculate balanced L1 loss.
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
beta (float): The loss is a piecewise function of prediction and target
and ``beta`` serves as a threshold for the difference between the
prediction and target. Defaults to 1.0.
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss.
Defaults to 1.5.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
return loss
@LOSSES.register_module()
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss.
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Args:
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
beta (float, optional): The loss is a piecewise function of prediction
and target. ``beta`` serves as a threshold for the difference
between the prediction and target. Defaults to 1.0.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self,
alpha=0.5,
gamma=1.5,
beta=1.0,
reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
weight (torch.Tensor, optional): Sample-wise loss weight with
shape (N, ).
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
|
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
and applies SPLADE transformation (ReLU + log) followed by pooling across the
sequence length dimension.
Args:
word_embedding_dimension: Dimension of the word embeddings (vocab size)
pooling_strategy: Either 'max' or 'sum' for SPLADE pooling
"""
SPLADE_POOLING_MODES = ("sum", "max")
def __init__(self, pooling_strategy: str = "max") -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.config_keys = ["pooling_strategy"]
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""FForward pass of the model.
Args:
features: Dictionary containing input features with 'mlm_logits' key
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["mlm_logits"]
# Apply ReLU and log transformation for SPLADE
splade_scores = torch.log1p(torch.relu(mlm_logits))
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
return {"sentence_embedding": pooled_scores}
def get_config_dict(self) -> dict[str, Any]:
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path) -> SpladePooling:
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return SpladePooling(**config)
|
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
and applies SPLADE transformation (ReLU + log) followed by pooling across the
sequence length dimension.
Args:
word_embedding_dimension: Dimension of the word embeddings (vocab size)
pooling_strategy: Either 'max' or 'sum' for SPLADE pooling
"""
SPLADE_POOLING_MODES = (
"sum",
"max",
)
def __init__(
self,
pooling_strategy: str = "max",
) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.config_keys = ["pooling_strategy"]
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the mofrom ...models.Pooling import Pooling
del.
Args:
features: Dictionary containing input features with 'mlm_logits' key
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["mlm_logits"]
# Apply ReLU and log transformation for SPLADE
splade_scores = torch.log1p(torch.relu(mlm_logits))
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
return {"sentence_embedding": pooled_scores}
def get_config_dict(self) -> dict[str, Any]:
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path) -> SpladePooling:
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return SpladePooling(**config)
|
import os
import numpy as np
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle', 'json-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_array_save_load_binary(protocol, compress, tmp_path, show_progress, array_cls):
tmp_file = os.path.join(tmp_path, 'test')
da = array_cls[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = array_cls[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle', 'json-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
@pytest.mark.parametrize('to_doc_vec', [True, False])
def test_array_save_load_binary_streaming(
protocol, compress, tmp_path, show_progress, to_doc_vec
):
tmp_file = os.path.join(tmp_path, 'test')
array_cls = DocVec if to_doc_vec else DocList
da = DocList[MyDoc]()
def _extend_da(num_docs=100):
for _ in range(num_docs):
da.extend(
[
MyDoc(
embedding=np.random.rand(3, 2),
text='hello',
image=ImageDoc(url='aux.png'),
),
]
)
_extend_da()
if to_doc_vec:
da = da.to_doc_vec()
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da_after = array_cls[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
for i, doc in enumerate(da_after):
assert doc.id == da[i].id
assert doc.text == da[i].text
assert doc.image.url == da[i].image.url
assert i == 99
|
import os
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle', 'json-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocList[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocList[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle', 'json-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary_streaming(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocList[MyDoc]()
def _extend_da(num_docs=100):
for _ in range(num_docs):
da.extend(
[
MyDoc(
embedding=np.random.rand(3, 2),
text='hello',
image=ImageDoc(url='aux.png'),
),
]
)
_extend_da()
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocList[MyDoc]()
da_generator = DocList[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
for i, doc in enumerate(da_generator):
assert doc.id == da[i].id
assert doc.text == da[i].text
assert doc.image.url == da[i].image.url
da2.append(doc)
assert len(da2) == 100
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './cascade-mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
type='PAA',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='PAAHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='PAA',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='PAAHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .ld_head import LDHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead'
]
|
import threading
import fsspec.asyn
import torch
from ...iterable_dataset import IterableDataset, _apply_feature_types_on_example
from ...utils.logging import get_logger
logger = get_logger(__name__)
def _set_fsspec_for_multiprocess() -> None:
"""
Clear reference to the loop and thread.
This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
Only required for fsspec >= 0.9.0
See https://github.com/fsspec/gcsfs/issues/379
"""
if hasattr(fsspec.asyn, "reset_lock"):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
fsspec.asyn.lock = threading.Lock()
class TorchIterableDataset(IterableDataset, torch.utils.data.IterableDataset):
def __iter__(self):
# fix for fsspec when using multprocess
_set_fsspec_for_multiprocess()
worker_info = torch.utils.data.get_worker_info()
if worker_info is None: # single-process data loading, return the full iterator
yield from IterableDataset.__iter__(self)
else: # in a worker process
# check if there aren't too many workers
if worker_info.id == 0 and self.n_shards < worker_info.num_workers:
logger.warning(
f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.n_shards={self.n_shards}). "
f"Stopping dataloader workers [{self.n_shards}...{worker_info.num_workers -1}]."
)
logger.warning(
f"To parallelize data loading, we give each process some shards (or data sources) to process. "
f"Therefore it's unnecessary to have a number of workers greater than dataset.n_shards={self.n_shards}."
f"To enable more parallelism, please split the dataset in more files than {self.n_shards}."
)
# split workload
shards_indices = list(range(worker_info.id, self.n_shards, worker_info.num_workers))
if shards_indices:
logger.debug(
f"dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{self.n_shards} shards."
)
for shard_idx in shards_indices:
for key, example in self._iter_shard(shard_idx):
if self.features:
yield _apply_feature_types_on_example(
example, self.features, token_per_repo_id=self._token_per_repo_id
)
else:
yield example
logger.debug(
f"dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{self.n_shards} shards."
)
else:
logger.debug(
f"dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({self.n_shards}<{worker_info.num_workers})."
)
|
import threading
import fsspec.asyn
import torch
from ...iterable_dataset import IterableDataset, _apply_feature_types
from ...utils.logging import get_logger
logger = get_logger(__name__)
def _set_fsspec_for_multiprocess() -> None:
"""
Clear reference to the loop and thread.
This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
Only required for fsspec >= 0.9.0
See https://github.com/fsspec/gcsfs/issues/379
"""
if hasattr(fsspec.asyn, "reset_lock"):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
fsspec.asyn.lock = threading.Lock()
class TorchIterableDataset(IterableDataset, torch.utils.data.IterableDataset):
def __iter__(self):
# fix for fsspec when using multprocess
_set_fsspec_for_multiprocess()
worker_info = torch.utils.data.get_worker_info()
if worker_info is None: # single-process data loading, return the full iterator
yield from IterableDataset.__iter__(self)
else: # in a worker process
# check if there aren't too many workers
if worker_info.id == 0 and self.n_shards < worker_info.num_workers:
logger.warning(
f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.n_shards={self.n_shards}). "
f"Stopping dataloader workers [{self.n_shards}...{worker_info.num_workers -1}]."
)
logger.warning(
f"To parallelize data loading, we give each process some shards (or data sources) to process. "
f"Therefore it's unnecessary to have a number of workers greater than dataset.n_shards={self.n_shards}."
f"To enable more parallelism, please split the dataset in more files than {self.n_shards}."
)
# split workload
shards_indices = list(range(worker_info.id, self.n_shards, worker_info.num_workers))
if shards_indices:
logger.debug(
f"dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{self.n_shards} shards."
)
for shard_idx in shards_indices:
for key, example in self._iter_shard(shard_idx):
if self.features:
yield _apply_feature_types(
example, self.features, token_per_repo_id=self._token_per_repo_id
)
else:
yield example
logger.debug(
f"dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{self.n_shards} shards."
)
else:
logger.debug(
f"dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({self.n_shards}<{worker_info.num_workers})."
)
|
from typing import Union, Iterable, Dict, List
import warnings
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
@staticmethod
def _parse_index_ids_from_bulk_info(
accumulated_info: List[Dict],
) -> Dict[str, List[int]]:
"""Parse ids from bulk info of failed send request to ES operation
:param accumulated_info: accumulated info of failed operation
:return: dict containing failed index ids of each operation type
"""
parsed_ids = {}
for info in accumulated_info:
for _op_type in info.keys():
if '_id' in info[_op_type]:
if _op_type not in parsed_ids:
parsed_ids[_op_type] = []
parsed_ids[_op_type].append(info[_op_type]['_id'])
return parsed_ids
def _upload_batch(self, docs: Iterable['Document'], **kwargs) -> List[int]:
requests = [self._document_to_elastic(doc) for doc in docs]
accumulated_info = self._send_requests(requests, **kwargs)
self._refresh(self._config.index_name)
successful_ids = self._parse_index_ids_from_bulk_info(accumulated_info)
if 'index' not in successful_ids:
return []
return successful_ids['index']
def _extend(self, docs: Iterable['Document'], **kwargs):
docs = list(docs)
successful_indexed_ids = self._upload_batch(docs, **kwargs)
self._offset2ids.extend(
[_id for _id in successful_indexed_ids if _id not in self._offset2ids.ids]
)
if len(successful_indexed_ids) != len(docs):
doc_ids = [doc.id for doc in docs]
failed_index_ids = set(doc_ids) - set(successful_indexed_ids)
err_msg = f'fail to add Documents with ids: {failed_index_ids}'
warnings.warn(err_msg)
raise IndexError(err_msg)
|
from typing import Union, Iterable, Dict, List
import warnings
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
@staticmethod
def _parse_index_ids_from_bulk_info(
accumulated_info: List[Dict],
) -> Dict[str, List[int]]:
"""Parse ids from bulk info of failed send request to ES operation
:param accumulated_info: accumulated info of failed operation
:return: dict containing failed index ids of each operation type
"""
parsed_ids = {}
for info in accumulated_info:
for _op_type in info.keys():
if '_id' in info[_op_type]:
if _op_type not in parsed_ids:
parsed_ids[_op_type] = []
parsed_ids[_op_type].append(info[_op_type]['_id'])
return parsed_ids
def _upload_batch(self, docs: Iterable['Document'], **kwargs) -> List[int]:
requests = [self._document_to_elastic(doc) for doc in docs]
accumulated_info = self._send_requests(requests, **kwargs)
self._refresh(self._config.index_name)
successful_ids = self._parse_index_ids_from_bulk_info(accumulated_info)
if 'index' not in successful_ids:
return []
return successful_ids['index']
def extend(self, docs: Iterable['Document'], **kwargs):
docs = list(docs)
successful_indexed_ids = self._upload_batch(docs, **kwargs)
self._offset2ids.extend(
[_id for _id in successful_indexed_ids if _id not in self._offset2ids.ids]
)
if len(successful_indexed_ids) != len(docs):
doc_ids = [doc.id for doc in docs]
failed_index_ids = set(doc_ids) - set(successful_indexed_ids)
err_msg = f'fail to add Documents with ids: {failed_index_ids}'
warnings.warn(err_msg)
raise IndexError(err_msg)
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEBinaryClassificationEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import csv
from zipfile import ZipFile
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
dataset_path = "quora-dataset/"
if not os.path.exists(dataset_path):
logger.info("Dataset not found. Download")
zip_save_path = "quora-IR-dataset.zip"
util.http_get(url="https://sbert.net/datasets/quora-IR-dataset.zip", path=zip_save_path)
with ZipFile(zip_save_path, "r") as zip:
zip.extractall(dataset_path)
# Read the quora dataset split for classification
logger.info("Read train dataset")
train_samples = []
with open(os.path.join(dataset_path, "classification", "train_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
train_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
train_samples.append(InputExample(texts=[row["question2"], row["question1"]], label=int(row["is_duplicate"])))
logger.info("Read dev dataset")
dev_samples = []
with open(os.path.join(dataset_path, "classification", "dev_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
dev_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
# Configuration
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base with a single label, i.e., it will output a value between 0 and 1 indicating the similarity of the two questions
model = CrossEncoder("distilroberta-base", num_labels=1)
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CEBinaryClassificationEvaluator.from_input_examples(dev_samples, name="Quora-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=5000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEBinaryClassificationEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import csv
from zipfile import ZipFile
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
dataset_path = "quora-dataset/"
if not os.path.exists(dataset_path):
logger.info("Dataset not found. Download")
zip_save_path = "quora-IR-dataset.zip"
util.http_get(url="https://sbert.net/datasets/quora-IR-dataset.zip", path=zip_save_path)
with ZipFile(zip_save_path, "r") as zip:
zip.extractall(dataset_path)
# Read the quora dataset split for classification
logger.info("Read train dataset")
train_samples = []
with open(os.path.join(dataset_path, "classification", "train_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
train_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
train_samples.append(InputExample(texts=[row["question2"], row["question1"]], label=int(row["is_duplicate"])))
logger.info("Read dev dataset")
dev_samples = []
with open(os.path.join(dataset_path, "classification", "dev_pairs.tsv"), "r", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
dev_samples.append(InputExample(texts=[row["question1"], row["question2"]], label=int(row["is_duplicate"])))
# Configuration
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base with a single label, i.e., it will output a value between 0 and 1 indicating the similarity of the two questions
model = CrossEncoder("distilroberta-base", num_labels=1)
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CEBinaryClassificationEvaluator.from_input_examples(dev_samples, name="Quora-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=5000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
action='store_true',
help='resume from the latest checkpoint in the work_dir automatically')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
if args.resume:
cfg.resume = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
action='store_true',
help='resume from the latest checkpoint in the work_dir automatically')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
cfg.resume = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
"""Utilities for environment variables."""
from __future__ import annotations
import os
from typing import Any, Optional, Union
def env_var_is_set(env_var: str) -> bool:
"""Check if an environment variable is set.
Args:
env_var (str): The name of the environment variable.
Returns:
bool: True if the environment variable is set, False otherwise.
"""
return env_var in os.environ and os.environ[env_var] not in (
"",
"0",
"false",
"False",
)
def get_from_dict_or_env(
data: dict[str, Any],
key: Union[str, list[str]],
env_key: str,
default: Optional[str] = None,
) -> str:
"""Get a value from a dictionary or an environment variable.
Args:
data: The dictionary to look up the key in.
key: The key to look up in the dictionary. This can be a list of keys to try
in order.
env_key: The environment variable to look up if the key is not
in the dictionary.
default: The default value to return if the key is not in the dictionary
or the environment. Defaults to None.
"""
if isinstance(key, (list, tuple)):
for k in key:
if k in data and data[k]:
return data[k]
if isinstance(key, str) and key in data and data[key]:
return data[key]
key_for_err = key[0] if isinstance(key, (list, tuple)) else key
return get_from_env(key_for_err, env_key, default=default)
def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str:
"""Get a value from a dictionary or an environment variable.
Args:
key: The key to look up in the dictionary.
env_key: The environment variable to look up if the key is not
in the dictionary.
default: The default value to return if the key is not in the dictionary
or the environment. Defaults to None.
Returns:
str: The value of the key.
Raises:
ValueError: If the key is not in the dictionary and no default value is
provided or if the environment variable is not set.
"""
if env_key in os.environ and os.environ[env_key]:
return os.environ[env_key]
elif default is not None:
return default
else:
msg = (
f"Did not find {key}, please add an environment variable"
f" `{env_key}` which contains it, or pass"
f" `{key}` as a named parameter."
)
raise ValueError(msg)
|
from __future__ import annotations
import os
from typing import Any, Optional, Union
def env_var_is_set(env_var: str) -> bool:
"""Check if an environment variable is set.
Args:
env_var (str): The name of the environment variable.
Returns:
bool: True if the environment variable is set, False otherwise.
"""
return env_var in os.environ and os.environ[env_var] not in (
"",
"0",
"false",
"False",
)
def get_from_dict_or_env(
data: dict[str, Any],
key: Union[str, list[str]],
env_key: str,
default: Optional[str] = None,
) -> str:
"""Get a value from a dictionary or an environment variable.
Args:
data: The dictionary to look up the key in.
key: The key to look up in the dictionary. This can be a list of keys to try
in order.
env_key: The environment variable to look up if the key is not
in the dictionary.
default: The default value to return if the key is not in the dictionary
or the environment. Defaults to None.
"""
if isinstance(key, (list, tuple)):
for k in key:
if k in data and data[k]:
return data[k]
if isinstance(key, str) and key in data and data[key]:
return data[key]
key_for_err = key[0] if isinstance(key, (list, tuple)) else key
return get_from_env(key_for_err, env_key, default=default)
def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str:
"""Get a value from a dictionary or an environment variable.
Args:
key: The key to look up in the dictionary.
env_key: The environment variable to look up if the key is not
in the dictionary.
default: The default value to return if the key is not in the dictionary
or the environment. Defaults to None.
Returns:
str: The value of the key.
Raises:
ValueError: If the key is not in the dictionary and no default value is
provided or if the environment variable is not set.
"""
if env_key in os.environ and os.environ[env_key]:
return os.environ[env_key]
elif default is not None:
return default
else:
msg = (
f"Did not find {key}, please add an environment variable"
f" `{env_key}` which contains it, or pass"
f" `{key}` as a named parameter."
)
raise ValueError(msg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.