input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""Configure global settings and get information about the working environment."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Machine learning module for Python
# ==================================
#
# sklearn is a Python module integrating classical machine
# learning algorithms in the tightly-knit world of scientific Python
# packages (numpy, scipy, matplotlib).
#
# It aims to provide simple and efficient solutions to learning problems
# that are accessible to everybody and reusable in various contexts:
# machine-learning as a versatile tool for science and engineering.
#
# See https://scikit-learn.org for complete documentation.
import importlib as _importlib
import logging
import os
import random
from ._config import config_context, get_config, set_config
logger = logging.getLogger(__name__)
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y.0 # For first release after an increment in Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.Y.ZaN # Alpha release
# X.Y.ZbN # Beta release
# X.Y.ZrcN # Release Candidate
# X.Y.Z # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = "1.8.dev0"
# On OSX, we can get a runtime error due to multiple OpenMP libraries loaded
# simultaneously. This can happen for instance when calling BLAS inside a
# prange. Setting the following environment variable allows multiple OpenMP
# libraries to be loaded. It should not degrade performances since we manually
# take care of potential over-subcription performance issues, in sections of
# the code where nested OpenMP loops can happen, by dynamically reconfiguring
# the inner OpenMP runtime to temporarily disable it while under the scope of
# the outer OpenMP parallel section.
os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True")
# Workaround issue discovered in intel-openmp 2019.5:
# https://github.com/ContinuumIO/anaconda-issues/issues/11294
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
# `_distributor_init` allows distributors to run custom init code.
# For instance, for the Windows wheel, this is used to pre-load the
# vcomp shared library runtime for OpenMP embedded in the sklearn/.libs
# sub-folder.
# It is necessary to do this prior to importing show_versions as the
# later is linked to the OpenMP runtime to make it possible to introspect
# it and importing it first would fail if the OpenMP dll cannot be found.
from . import ( # noqa: F401 E402
__check_build,
_distributor_init,
)
from .base import clone # noqa: E402
from .utils._show_versions import show_versions # noqa: E402
_submodules = [
"calibration",
"cluster",
"covariance",
"cross_decomposition",
"datasets",
"decomposition",
"dummy",
"ensemble",
"exceptions",
"experimental",
"externals",
"feature_extraction",
"feature_selection",
"frozen",
"gaussian_process",
"inspection",
"isotonic",
"kernel_approximation",
"kernel_ridge",
"linear_model",
"manifold",
"metrics",
"mixture",
"model_selection",
"multiclass",
"multioutput",
"naive_bayes",
"neighbors",
"neural_network",
"pipeline",
"preprocessing",
"random_projection",
"semi_supervised",
"svm",
"tree",
"discriminant_analysis",
"impute",
"compose",
]
__all__ = _submodules + [
# Non-modules:
"clone",
"get_config",
"set_config",
"config_context",
"show_versions",
]
def __dir__():
return __all__
def __getattr__(name):
if name in _submodules:
return _importlib.import_module(f"sklearn.{name}")
else:
try:
return globals()[name]
except KeyError:
raise AttributeError(f"Module 'sklearn' has no attribute '{name}'")
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import numpy as np
# Check if a random seed exists in the environment, if not create one.
_random_seed = os.environ.get("SKLEARN_SEED", None)
if _random_seed is None:
_random_seed = np.random.uniform() * np.iinfo(np.int32).max
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
|
"""Configure global settings and get information about the working environment."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Machine learning module for Python
# ==================================
#
# sklearn is a Python module integrating classical machine
# learning algorithms in the tightly-knit world of scientific Python
# packages (numpy, scipy, matplotlib).
#
# It aims to provide simple and efficient solutions to learning problems
# that are accessible to everybody and reusable in various contexts:
# machine-learning as a versatile tool for science and engineering.
#
# See https://scikit-learn.org for complete documentation.
import importlib as _importlib
import logging
import os
import random
from ._config import config_context, get_config, set_config
logger = logging.getLogger(__name__)
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y.0 # For first release after an increment in Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.Y.ZaN # Alpha release
# X.Y.ZbN # Beta release
# X.Y.ZrcN # Release Candidate
# X.Y.Z # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = "1.8.dev0"
# On OSX, we can get a runtime error due to multiple OpenMP libraries loaded
# simultaneously. This can happen for instance when calling BLAS inside a
# prange. Setting the following environment variable allows multiple OpenMP
# libraries to be loaded. It should not degrade performances since we manually
# take care of potential over-subcription performance issues, in sections of
# the code where nested OpenMP loops can happen, by dynamically reconfiguring
# the inner OpenMP runtime to temporarily disable it while under the scope of
# the outer OpenMP parallel section.
os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True")
# Workaround issue discovered in intel-openmp 2019.5:
# https://github.com/ContinuumIO/anaconda-issues/issues/11294
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
# `_distributor_init` allows distributors to run custom init code.
# For instance, for the Windows wheel, this is used to pre-load the
# vcomp shared library runtime for OpenMP embedded in the sklearn/.libs
# sub-folder.
# It is necessary to do this prior to importing show_versions as the
# later is linked to the OpenMP runtime to make it possible to introspect
# it and importing it first would fail if the OpenMP dll cannot be found.
from . import ( # noqa: F401 E402
__check_build,
_distributor_init,
)
from .base import clone # noqa: E402
from .utils._show_versions import show_versions # noqa: E402
_submodules = [
"calibration",
"cluster",
"covariance",
"cross_decomposition",
"datasets",
"decomposition",
"dummy",
"ensemble",
"exceptions",
"experimental",
"externals",
"feature_extraction",
"feature_selection",
"frozen",
"gaussian_process",
"inspection",
"isotonic",
"kernel_approximation",
"kernel_ridge",
"linear_model",
"manifold",
"metrics",
"mixture",
"model_selection",
"multiclass",
"multioutput",
"naive_bayes",
"neighbors",
"neural_network",
"pipeline",
"preprocessing",
"random_projection",
"semi_supervised",
"svm",
"tree",
"discriminant_analysis",
"impute",
"compose",
]
__all__ = _submodules + [
# Non-modules:
"clone",
"get_config",
"set_config",
"config_context",
"show_versions",
]
def __dir__():
return __all__
def __getattr__(name):
if name in _submodules:
return _importlib.import_module(f"sklearn.{name}")
else:
try:
return globals()[name]
except KeyError:
raise AttributeError(f"Module 'sklearn' has no attribute '{name}'")
_BUILT_WITH_MESON = False
try:
import sklearn._built_with_meson # noqa: F401
_BUILT_WITH_MESON = True
except ModuleNotFoundError:
pass
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import numpy as np
# Check if a random seed exists in the environment, if not create one.
_random_seed = os.environ.get("SKLEARN_SEED", None)
if _random_seed is None:
_random_seed = np.random.uniform() * np.iinfo(np.int32).max
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
|
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from ...clip_image import CLIPImageEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(blob=np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8))
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=CLIPImageEncoder) as flow:
resp = flow.post(
on="/index",
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (512,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from ...clip_image import CLIPImageEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(blob=np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8))
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=CLIPImageEncoder) as flow:
resp = flow.post(
on="/index",
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (512,)
|
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
from tests.index.elastic.fixture import start_storage_v8 # noqa: F401
file_to_skip = ['fastAPI', 'jina', 'index', 'first_steps.md']
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
"\tHint: you can add support for any language by using register_executor"
)
executor = _executors[lang]
all_code = ""
add_code_block = True
for b in grab_code_blocks(raw, lang=lang):
add_code_block = True
for keyword in keyword_ignore:
if keyword in b:
add_code_block = False
break
if add_code_block:
all_code = f"{all_code}\n{b}"
executor(all_code)
def check_md_file(fpath, memory=False, lang="python", keyword_ignore=[]):
"""
NOTE: copy paste from mktestdocs.__main__ and add the keyword ignore
Given a markdown file, parse the contents for python code blocks
and check that each independent block does not cause an error.
Arguments:
fpath: path to markdown file
memory: whether or not previous code-blocks should be remembered
"""
text = pathlib.Path(fpath).read_text()
if not memory:
check_raw_string(text, lang=lang)
else:
check_raw_file_full(text, lang=lang, keyword_ignore=keyword_ignore)
files_to_check = [
*list(pathlib.Path('docs/user_guide').glob('**/*.md')),
*list(pathlib.Path('docs/data_types').glob('**/*.md')),
]
file_to_remove = []
for file in files_to_check:
for fn in file_to_skip:
if fn in str(file):
file_to_remove.append(file)
for file in file_to_remove:
files_to_check.remove(file)
@pytest.mark.parametrize('fpath', files_to_check, ids=str)
def test_files_good(fpath):
check_md_file(fpath=fpath, memory=True, keyword_ignore=['pickle', 'jac'])
def test_readme():
check_md_file(
fpath='README.md',
memory=True,
keyword_ignore=[
'tensorflow',
'fastapi',
'push',
'langchain',
'MovieDoc',
'jina',
],
)
|
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
from docarray.utils._internal.pydantic import is_pydantic_v2
from tests.index.elastic.fixture import start_storage_v8 # noqa: F401
file_to_skip = ['fastAPI', 'jina', 'index', 'first_steps.md']
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
"\tHint: you can add support for any language by using register_executor"
)
executor = _executors[lang]
all_code = ""
add_code_block = True
for b in grab_code_blocks(raw, lang=lang):
add_code_block = True
for keyword in keyword_ignore:
if keyword in b:
add_code_block = False
break
if add_code_block:
all_code = f"{all_code}\n{b}"
executor(all_code)
def check_md_file(fpath, memory=False, lang="python", keyword_ignore=[]):
"""
NOTE: copy paste from mktestdocs.__main__ and add the keyword ignore
Given a markdown file, parse the contents for python code blocks
and check that each independent block does not cause an error.
Arguments:
fpath: path to markdown file
memory: whether or not previous code-blocks should be remembered
"""
text = pathlib.Path(fpath).read_text()
if not memory:
check_raw_string(text, lang=lang)
else:
check_raw_file_full(text, lang=lang, keyword_ignore=keyword_ignore)
files_to_check = [
*list(pathlib.Path('docs/user_guide').glob('**/*.md')),
*list(pathlib.Path('docs/data_types').glob('**/*.md')),
]
file_to_remove = []
for file in files_to_check:
for fn in file_to_skip:
if fn in str(file):
file_to_remove.append(file)
for file in file_to_remove:
files_to_check.remove(file)
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
@pytest.mark.parametrize('fpath', files_to_check, ids=str)
def test_files_good(fpath):
check_md_file(fpath=fpath, memory=True, keyword_ignore=['pickle', 'jac'])
def test_readme():
check_md_file(
fpath='README.md',
memory=True,
keyword_ignore=[
'tensorflow',
'fastapi',
'push',
'langchain',
'MovieDoc',
'jina',
],
)
|
from typing import Optional, Union
from langchain.agents import AgentOutputParser
from langchain_core.agents import AgentAction, AgentFinish
def extract_action_details(text: str) -> tuple[Optional[str], Optional[str]]:
# Split the text into lines and strip whitespace
lines = [line.strip() for line in text.strip().split("\n")]
# Initialize variables to hold the extracted values
action = None
action_input = None
# Iterate through the lines to find and extract the desired information
for line in lines:
if line.startswith("Action:"):
action = line.split(":", 1)[1].strip()
elif line.startswith("Action Input:"):
action_input = line.split(":", 1)[1].strip()
return action, action_input
class FakeOutputParser(AgentOutputParser):
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
action, input = extract_action_details(text)
if action:
log = f"\nInvoking: `{action}` with `{input}"
return AgentAction(tool=action, tool_input=(input or ""), log=log)
elif "Final Answer" in text:
return AgentFinish({"output": text}, text)
return AgentAction(
"Intermediate Answer", "after_colon", "Final Answer: This should end"
)
@property
def _type(self) -> str:
return "self_ask"
|
from typing import Optional, Tuple, Union
from langchain.agents import AgentOutputParser
from langchain_core.agents import AgentAction, AgentFinish
def extract_action_details(text: str) -> Tuple[Optional[str], Optional[str]]:
# Split the text into lines and strip whitespace
lines = [line.strip() for line in text.strip().split("\n")]
# Initialize variables to hold the extracted values
action = None
action_input = None
# Iterate through the lines to find and extract the desired information
for line in lines:
if line.startswith("Action:"):
action = line.split(":", 1)[1].strip()
elif line.startswith("Action Input:"):
action_input = line.split(":", 1)[1].strip()
return action, action_input
class FakeOutputParser(AgentOutputParser):
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
print("FakeOutputParser", text)
action, input = extract_action_details(text)
if action:
log = f"\nInvoking: `{action}` with `{input}"
return AgentAction(tool=action, tool_input=(input or ""), log=log)
elif "Final Answer" in text:
return AgentFinish({"output": text}, text)
return AgentAction(
"Intermediate Answer", "after_colon", "Final Answer: This should end"
)
@property
def _type(self) -> str:
return "self_ask"
|
import sys
from jina.parsers import set_gateway_parser
from jina.parsers.helper import _update_gateway_args
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
def run(*args, **kwargs):
runtime_args = set_gateway_parser().parse_args(args)
_update_gateway_args(runtime_args)
with AsyncNewLoopRuntime(runtime_args, req_handler_cls=GatewayRequestHandler) as runtime:
runtime.run_forever()
if __name__ == '__main__':
run(*sys.argv[1:])
|
import sys
from jina.parsers import set_gateway_parser
from jina.parsers.helper import _update_gateway_args
from jina.serve.runtimes.gateway import GatewayRuntime
def run(*args, **kwargs):
runtime_cls = GatewayRuntime
print(f' args {args}')
runtime_args = set_gateway_parser().parse_args(args)
print(f' protocol {runtime_args.protocol}')
_update_gateway_args(runtime_args)
print(f' runtime_cls {runtime_cls}')
with runtime_cls(runtime_args) as runtime:
print(f' Lets run forever')
runtime.run_forever()
if __name__ == '__main__':
run(*sys.argv[1:])
|
import unittest
import torch
from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from ..test_pipelines_common import PipelineTesterMixin
class Lumina2Text2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = Lumina2Text2ImgPipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback_on_step_end",
"callback_on_step_end_tensor_inputs",
]
)
supports_dduf = False
test_xformers_attention = False
test_layerwise_casting = True
def get_dummy_components(self):
torch.manual_seed(0)
transformer = Lumina2Transformer2DModel(
sample_size=4,
patch_size=2,
in_channels=4,
hidden_size=8,
num_layers=2,
num_attention_heads=1,
num_kv_heads=1,
multiple_of=16,
ffn_dim_multiplier=None,
norm_eps=1e-5,
scaling_factor=1.0,
axes_dim_rope=[4, 2, 2],
cap_feat_dim=8,
)
torch.manual_seed(0)
vae = AutoencoderKL(
sample_size=32,
in_channels=3,
out_channels=3,
block_out_channels=(4,),
layers_per_block=1,
latent_channels=4,
norm_num_groups=1,
use_quant_conv=False,
use_post_quant_conv=False,
shift_factor=0.0609,
scaling_factor=1.5035,
)
scheduler = FlowMatchEulerDiscreteScheduler()
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
torch.manual_seed(0)
config = Gemma2Config(
head_dim=4,
hidden_size=8,
intermediate_size=8,
num_attention_heads=2,
num_hidden_layers=2,
num_key_value_heads=2,
sliding_window=2,
)
text_encoder = Gemma2Model(config)
components = {
"transformer": transformer,
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"height": 32,
"width": 32,
"output_type": "np",
}
return inputs
|
import unittest
import torch
from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from ..test_pipelines_common import PipelineTesterMixin
class Lumina2Text2ImgPipelinePipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = Lumina2Text2ImgPipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback_on_step_end",
"callback_on_step_end_tensor_inputs",
]
)
supports_dduf = False
test_xformers_attention = False
test_layerwise_casting = True
def get_dummy_components(self):
torch.manual_seed(0)
transformer = Lumina2Transformer2DModel(
sample_size=4,
patch_size=2,
in_channels=4,
hidden_size=8,
num_layers=2,
num_attention_heads=1,
num_kv_heads=1,
multiple_of=16,
ffn_dim_multiplier=None,
norm_eps=1e-5,
scaling_factor=1.0,
axes_dim_rope=[4, 2, 2],
cap_feat_dim=8,
)
torch.manual_seed(0)
vae = AutoencoderKL(
sample_size=32,
in_channels=3,
out_channels=3,
block_out_channels=(4,),
layers_per_block=1,
latent_channels=4,
norm_num_groups=1,
use_quant_conv=False,
use_post_quant_conv=False,
shift_factor=0.0609,
scaling_factor=1.5035,
)
scheduler = FlowMatchEulerDiscreteScheduler()
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
torch.manual_seed(0)
config = Gemma2Config(
head_dim=4,
hidden_size=8,
intermediate_size=8,
num_attention_heads=2,
num_hidden_layers=2,
num_key_value_heads=2,
sliding_window=2,
)
text_encoder = Gemma2Model(config)
components = {
"transformer": transformer.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder.eval(),
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"height": 32,
"width": 32,
"output_type": "np",
}
return inputs
|
import time
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_faiss
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# 2. Come up with some queries
queries = [
"How do I become a good programmer?",
"How do I become a good data scientist?",
]
# 3. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 4. Choose a target precision for the corpus embeddings
corpus_precision = "ubinary"
# Valid options are: "float32", "uint8", "int8", "ubinary", and "binary"
# But FAISS only supports "float32", "uint8", and "ubinary"
# 5. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# Initially, we don't have a FAISS index yet, we can use semantic_search_faiss to create it
corpus_index = None
while True:
# 7. Encode the queries using the full precision
start_time = time.time()
query_embeddings = model.encode(queries, normalize_embeddings=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 8. Perform semantic search using FAISS
results, search_time, corpus_index = semantic_search_faiss(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=4,
exact=True,
output_index=True,
)
# This is a helper function to showcase how FAISS can be used with quantized embeddings.
# You must either provide the `corpus_embeddings` or the `corpus_index` FAISS index, but not both.
# In the first call we'll provide the `corpus_embeddings` and get the `corpus_index` back, which
# we'll use in the next call. In practice, the index is stored in RAM or saved to disk, and not
# recalculated for every query.
# This function will 1) quantize the query embeddings to the same precision as the corpus embeddings,
# 2) perform the semantic search using FAISS, 3) rescore the results using the full precision embeddings,
# and 4) return the results and the search time (and perhaps the FAISS index).
# `corpus_precision` must be the same as the precision used to quantize the corpus embeddings.
# It is used to convert the query embeddings to the same precision as the corpus embeddings.
# `top_k` determines how many results are returned for each query.
# `rescore_multiplier` is a parameter for the rescoring step. Rather than searching for the top_k results,
# we search for top_k * rescore_multiplier results and rescore the top_k results using the full precision embeddings.
# So, higher values of rescore_multiplier will give better results, but will be slower.
# `calibration_embeddings` is a set of embeddings used to calibrate the quantization of the query embeddings.
# This is important only if you are using uint8 or int8 precision. In practice, this is used to calculate
# the minimum and maximum values of each of the embedding dimensions, which are then used to determine the
# quantization thresholds.
# `rescore` determines whether to rescore the results using the full precision embeddings, if False & the
# corpus is quantized, the results will be very poor. `exact` determines whether to use the exact search
# or the approximate search method in FAISS.
# 9. Output the results
print("Precision:", corpus_precision)
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
import time
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_faiss
from datasets import load_dataset
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# 2. Come up with some queries
queries = [
"How do I become a good programmer?",
"How do I become a good data scientist?",
]
# 3. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 4. Choose a target precision for the corpus embeddings
corpus_precision = "ubinary"
# Valid options are: "float32", "uint8", "int8", "ubinary", and "binary"
# But FAISS only supports "float32", "uint8", and "ubinary"
# 5. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# Initially, we don't have a FAISS index yet, we can use semantic_search_faiss to create it
corpus_index = None
while True:
# 7. Encode the queries using the full precision
start_time = time.time()
query_embeddings = model.encode(queries, normalize_embeddings=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 8. Perform semantic search using FAISS
results, search_time, corpus_index = semantic_search_faiss(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=4,
exact=True,
output_index=True,
)
# This is a helper function to showcase how FAISS can be used with quantized embeddings.
# You must either provide the `corpus_embeddings` or the `corpus_index` FAISS index, but not both.
# In the first call we'll provide the `corpus_embeddings` and get the `corpus_index` back, which
# we'll use in the next call. In practice, the index is stored in RAM or saved to disk, and not
# recalculated for every query.
# This function will 1) quantize the query embeddings to the same precision as the corpus embeddings,
# 2) perform the semantic search using FAISS, 3) rescore the results using the full precision embeddings,
# and 4) return the results and the search time (and perhaps the FAISS index).
# `corpus_precision` must be the same as the precision used to quantize the corpus embeddings.
# It is used to convert the query embeddings to the same precision as the corpus embeddings.
# `top_k` determines how many results are returned for each query.
# `rescore_multiplier` is a parameter for the rescoring step. Rather than searching for the top_k results,
# we search for top_k * rescore_multiplier results and rescore the top_k results using the full precision embeddings.
# So, higher values of rescore_multiplier will give better results, but will be slower.
# `calibration_embeddings` is a set of embeddings used to calibrate the quantization of the query embeddings.
# This is important only if you are using uint8 or int8 precision. In practice, this is used to calculate
# the minimum and maximum values of each of the embedding dimensions, which are then used to determine the
# quantization thresholds.
# `rescore` determines whether to rescore the results using the full precision embeddings, if False & the
# corpus is quantized, the results will be very poor. `exact` determines whether to use the exact search
# or the approximate search method in FAISS.
# 9. Output the results
print("Precision:", corpus_precision)
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim):
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim):
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
:param model: SentenceTransformerModel
:param similarity_fct: Function to compute the PAIRWISE similarity between embeddings. Default is ``util.pairwise_cos_sim``.
:param scale: Output of similarity function is multiplied by scale value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('bert-base-uncased')
train_examples = [InputExample(texts=['My first sentence', 'My second sentence'], label=1.0),
InputExample(texts=['My third sentence', 'Unrelated sentence'], label=0.3)]
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
"""**Load** module helps with serialization and deserialization."""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.load.dump import dumpd, dumps
from langchain_core.load.load import loads
from langchain_core.load.serializable import Serializable
# Unfortunately, we have to eagerly import load from langchain_core/load/load.py
# eagerly to avoid a namespace conflict. We want users to still be able to use
# `from langchain_core.load import load` to get the load function, but
# the `from langchain_core.load.load import load` absolute import should also work.
from langchain_core.load.load import load
__all__ = ["dumpd", "dumps", "load", "loads", "Serializable"]
_dynamic_imports = {
"dumpd": "dump",
"dumps": "dump",
"loads": "load",
"Serializable": "serializable",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Load** module helps with serialization and deserialization."""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.load.dump import dumpd, dumps
from langchain_core.load.load import loads
from langchain_core.load.serializable import Serializable
# Unfortunately, we have to eagerly import load from langchain_core/load/load.py
# eagerly to avoid a namespace conflict. We want users to still be able to use
# `from langchain_core.load import load` to get the load function, but
# the `from langchain_core.load.load import load` absolute import should also work.
from langchain_core.load.load import load
__all__ = ["dumpd", "dumps", "load", "loads", "Serializable"]
_dynamic_imports = {
"dumpd": "dump",
"dumps": "dump",
"load": "load",
"loads": "load",
"Serializable": "serializable",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_vision_available():
if is_torchvision_available():
from transformers import InternVLVideoProcessor
class InternVLVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_normalize=True,
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
):
size = size if size is not None else {"height": 384, "width": 384}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_video_shape(self, videos):
return [self.num_frames, self.num_channels, self.size["height"], self.size["width"]]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class InternVLVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = InternVLVideoProcessor if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.video_processor_tester = InternVLVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"height": 384, "width": 384})
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict, size=42)
self.assertEqual(video_processor.size, {"height": 42, "width": 42})
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_torch_available():
pass
if is_vision_available():
if is_torchvision_available():
from transformers import InternVLVideoProcessor
class InternVLVideoProcessingTester:
def __init__(
self,
parent,
batch_size=5,
num_frames=8,
num_channels=3,
min_resolution=30,
max_resolution=80,
do_resize=True,
size=None,
do_normalize=True,
image_mean=OPENAI_CLIP_MEAN,
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
):
size = size if size is not None else {"height": 384, "width": 384}
self.parent = parent
self.batch_size = batch_size
self.num_frames = num_frames
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_video_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def expected_output_video_shape(self, videos):
return [self.num_frames, self.num_channels, self.size["height"], self.size["width"]]
def prepare_video_inputs(self, equal_resolution=False, return_tensors="pil"):
videos = prepare_video_inputs(
batch_size=self.batch_size,
num_frames=self.num_frames,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
return_tensors=return_tensors,
)
return videos
@require_torch
@require_vision
class InternVLVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase):
fast_video_processing_class = InternVLVideoProcessor if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.video_processor_tester = InternVLVideoProcessingTester(self)
@property
def video_processor_dict(self):
return self.video_processor_tester.prepare_video_processor_dict()
def test_video_processor_from_dict_with_kwargs(self):
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict)
self.assertEqual(video_processor.size, {"height": 384, "width": 384})
video_processor = self.fast_video_processing_class.from_dict(self.video_processor_dict, size=42)
self.assertEqual(video_processor.size, {"height": 42, "width": 42})
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g learning rate
and momentum."""
priority = 'LOW'
def after_train_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here. Defaults to None.
"""
for scheduler in runner.schedulers:
if not scheduler.by_epoch:
scheduler.step()
def after_train_epoch(self, runner) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (Runner): The runner of the training process.
"""
for scheduler in runner.schedulers:
if scheduler.by_epoch:
scheduler.step()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g learning rate
and momentum."""
priority = 'LOW'
def after_train_iter(
self,
runner,
data_batch: DATA_BATCH = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (Sequence[BaseDataSample], optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here. Defaults to None.
"""
for scheduler in runner.schedulers:
if not scheduler.by_epoch:
scheduler.step()
def after_train_epoch(self, runner) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (Runner): The runner of the training process.
"""
for scheduler in runner.schedulers:
if scheduler.by_epoch:
scheduler.step()
|
"""Fake Embedding class for testing purposes."""
import math
from langchain_core.embeddings import Embeddings
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return simple embeddings.
Embeddings encode each text as its index."""
return [[1.0] * 9 + [float(i)] for i in range(len(texts))]
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> list[float]:
"""Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents."""
return [1.0] * 9 + [0.0]
async def aembed_query(self, text: str) -> list[float]:
return self.embed_query(text)
class ConsistentFakeEmbeddings(FakeEmbeddings):
"""Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts."""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: list[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [1.0] * (self.dimensionality - 1) + [
float(self.known_texts.index(text)),
]
out_vectors.append(vector)
return out_vectors
def embed_query(self, text: str) -> list[float]:
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
class AngularTwoDimensionalEmbeddings(Embeddings):
"""
From angles (as strings in units of pi) to unit embedding vectors on a circle.
"""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""
Make a list of texts into a list of embedding vectors.
"""
return [self.embed_query(text) for text in texts]
def embed_query(self, text: str) -> list[float]:
"""
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
# Assume: just test string, no attention is paid to values.
return [0.0, 0.0]
|
"""Fake Embedding class for testing purposes."""
import math
from langchain_core.embeddings import Embeddings
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return simple embeddings.
Embeddings encode each text as its index."""
return [[1.0] * 9 + [float(i)] for i in range(len(texts))]
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> list[float]:
"""Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents."""
return [1.0] * 9 + [0.0]
async def aembed_query(self, text: str) -> list[float]:
return self.embed_query(text)
class ConsistentFakeEmbeddings(FakeEmbeddings):
"""Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts."""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: list[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [1.0] * (self.dimensionality - 1) + [
float(self.known_texts.index(text))
]
out_vectors.append(vector)
return out_vectors
def embed_query(self, text: str) -> list[float]:
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
class AngularTwoDimensionalEmbeddings(Embeddings):
"""
From angles (as strings in units of pi) to unit embedding vectors on a circle.
"""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""
Make a list of texts into a list of embedding vectors.
"""
return [self.embed_query(text) for text in texts]
def embed_query(self, text: str) -> list[float]:
"""
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
# Assume: just test string, no attention is paid to values.
return [0.0, 0.0]
|
import json
import os
import pytest
from hubble.executor import HubExecutor
from hubble.executor.hubio import HubIO
from jina import __version__
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master', (True, False))
def test_version(is_master, requests_mock):
if is_master:
count = 0
else:
# current version is published already
count = 3
requests_mock.get(
'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags',
text=json.dumps(
{
'count': count,
'next': 'abc',
'previous': 'def',
'results': [{'a': 'b', 'c': 'd'}],
}
),
)
v = get_base_executor_version()
if is_master:
assert v == 'master'
else:
assert v == __version__
def test_to_compatible_name():
assert to_compatible_name('executor/hey-ha_HO') == 'executor-hey-ha-ho'
@pytest.mark.parametrize('uses', ['jinaai://jina-ai/DummyExecutor'])
def test_get_image_name(mocker, monkeypatch, uses):
mock = mocker.Mock()
def _mock_fetch(
name,
*args,
**kwargs,
):
mock(
name=name,
rebuild_image=kwargs.get(
'rebuild_image', args[2] if len(args) >= 3 else True
),
)
return (
HubExecutor(
uuid='hello',
name=name,
tag='v0',
image_name=f'jinahub/{name}',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
image_name = get_image_name(uses)
assert image_name in {'jinahub/DummyExecutor', 'jinahub/jina-ai/DummyExecutor'}
_, mock_kwargs = mock.call_args_list[0]
assert mock_kwargs['rebuild_image'] is True # default value must be True
os.environ['JINA_HUB_NO_IMAGE_REBUILD'] = '1'
get_image_name(uses)
del os.environ['JINA_HUB_NO_IMAGE_REBUILD']
_, mock_kwargs = mock.call_args_list[1]
assert mock_kwargs['rebuild_image'] is False # env var is set, so it must be False
|
import json
import os
import pytest
from hubble.executor import HubExecutor
from hubble.executor.hubio import HubIO
from jina import __version__
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master', (True, False))
def test_version(is_master, requests_mock):
if is_master:
count = 0
else:
# current version is published already
count = 3
requests_mock.get(
'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags',
text=json.dumps(
{
'count': count,
'next': 'abc',
'previous': 'def',
'results': [{'a': 'b', 'c': 'd'}],
}
),
)
v = get_base_executor_version()
if is_master:
assert v == 'master'
else:
assert v == __version__
def test_to_compatible_name():
assert to_compatible_name('executor/hey-ha_HO') == 'executor-hey-ha-ho'
@pytest.mark.parametrize('uses', ['jinaai://jina-ai/DummyExecutor'])
def test_get_image_name(mocker, monkeypatch, uses):
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
prefer_platform=None,
secret=None,
force=False,
):
mock(name=name, rebuild_image=rebuild_image)
return (
HubExecutor(
uuid='hello',
name=name,
tag='v0',
image_name=f'jinahub/{name}',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
image_name = get_image_name(uses)
assert image_name in {'jinahub/DummyExecutor', 'jinahub/jina-ai/DummyExecutor'}
_, mock_kwargs = mock.call_args_list[0]
assert mock_kwargs['rebuild_image'] is True # default value must be True
os.environ['JINA_HUB_NO_IMAGE_REBUILD'] = '1'
get_image_name(uses)
del os.environ['JINA_HUB_NO_IMAGE_REBUILD']
_, mock_kwargs = mock.call_args_list[1]
assert mock_kwargs['rebuild_image'] is False # env var is set, so it must be False
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestConditionalDETR(TestCase):
def setUp(self) -> None:
register_all_modules()
def test_conditional_detr_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
img_metas = DetDataSample()
img_metas.set_metainfo(metainfo)
batch_data_samples = []
batch_data_samples.append(img_metas)
config = get_detector_cfg(
'conditional_detr/conditional-detr_r50_8xb2-50e_coco.py')
model = build_detector(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
img_metas.gt_instances = gt_instances
batch_data_samples1 = []
batch_data_samples1.append(img_metas)
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'bbox' in key:
self.assertEqual(
loss.item(), 0,
'there should be no box loss when no ground true boxes')
elif 'iou' in key:
self.assertEqual(
loss.item(), 0,
'there should be no iou loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
img_metas.gt_instances = gt_instances
batch_data_samples2 = []
batch_data_samples2.append(img_metas)
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(random_image, batch_data_samples=batch_data_samples2)
# test only predict
model.predict(
random_image, batch_data_samples=batch_data_samples2, rescale=True)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestConditionalDETR(TestCase):
def setUp(self) -> None:
register_all_modules()
def test_conditional_detr_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
img_metas = DetDataSample()
img_metas.set_metainfo(metainfo)
batch_data_samples = []
batch_data_samples.append(img_metas)
config = get_detector_cfg(
'conditional_detr/conditional_detr_r50_8xb2-50e_coco.py')
model = build_detector(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
img_metas.gt_instances = gt_instances
batch_data_samples1 = []
batch_data_samples1.append(img_metas)
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'bbox' in key:
self.assertEqual(
loss.item(), 0,
'there should be no box loss when no ground true boxes')
elif 'iou' in key:
self.assertEqual(
loss.item(), 0,
'there should be no iou loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
img_metas.gt_instances = gt_instances
batch_data_samples2 = []
batch_data_samples2.append(img_metas)
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(random_image, batch_data_samples=batch_data_samples2)
# test only predict
model.predict(
random_image, batch_data_samples=batch_data_samples2, rescale=True)
|
"""Different ways to combine documents."""
from langchain.chains.combine_documents.reduce import (
acollapse_docs,
collapse_docs,
split_list_of_docs,
)
from langchain.chains.combine_documents.stuff import create_stuff_documents_chain
__all__ = [
"acollapse_docs",
"collapse_docs",
"create_stuff_documents_chain",
"split_list_of_docs",
]
|
"""Different ways to combine documents."""
from langchain.chains.combine_documents.reduce import (
acollapse_docs,
collapse_docs,
split_list_of_docs,
)
from langchain.chains.combine_documents.stuff import create_stuff_documents_chain
__all__ = [
"acollapse_docs",
"collapse_docs",
"split_list_of_docs",
"create_stuff_documents_chain",
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize as deserialize
from keras.src.activations import get as get
from keras.src.activations import serialize as serialize
from keras.src.activations.activations import celu as celu
from keras.src.activations.activations import elu as elu
from keras.src.activations.activations import exponential as exponential
from keras.src.activations.activations import gelu as gelu
from keras.src.activations.activations import glu as glu
from keras.src.activations.activations import hard_shrink as hard_shrink
from keras.src.activations.activations import hard_sigmoid as hard_sigmoid
from keras.src.activations.activations import hard_silu as hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh as hard_tanh
from keras.src.activations.activations import leaky_relu as leaky_relu
from keras.src.activations.activations import linear as linear
from keras.src.activations.activations import log_sigmoid as log_sigmoid
from keras.src.activations.activations import log_softmax as log_softmax
from keras.src.activations.activations import mish as mish
from keras.src.activations.activations import relu as relu
from keras.src.activations.activations import relu6 as relu6
from keras.src.activations.activations import selu as selu
from keras.src.activations.activations import sigmoid as sigmoid
from keras.src.activations.activations import silu as silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink as soft_shrink
from keras.src.activations.activations import softmax as softmax
from keras.src.activations.activations import softplus as softplus
from keras.src.activations.activations import softsign as softsign
from keras.src.activations.activations import sparse_plus as sparse_plus
from keras.src.activations.activations import sparse_sigmoid as sparse_sigmoid
from keras.src.activations.activations import sparsemax as sparsemax
from keras.src.activations.activations import squareplus as squareplus
from keras.src.activations.activations import tanh as tanh
from keras.src.activations.activations import tanh_shrink as tanh_shrink
from keras.src.activations.activations import threshold as threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparse_sigmoid
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.activations.activations import threshold
|
import logging
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLLM
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain_core.runnables import Runnable
logger = logging.getLogger(__name__)
# Default template
DEFAULT_TEMPLATE = """You are an assistant tasked with taking a natural language \
query from a user and converting it into a query for a vectorstore. \
In this process, you strip out information that is not relevant for \
the retrieval task. Here is the user query: {question}"""
# Default prompt
DEFAULT_QUERY_PROMPT = PromptTemplate.from_template(DEFAULT_TEMPLATE)
class RePhraseQueryRetriever(BaseRetriever):
"""Given a query, use an LLM to re-phrase it.
Then, retrieve docs for the re-phrased query."""
retriever: BaseRetriever
llm_chain: Runnable
@classmethod
def from_llm(
cls,
retriever: BaseRetriever,
llm: BaseLLM,
prompt: BasePromptTemplate = DEFAULT_QUERY_PROMPT,
) -> "RePhraseQueryRetriever":
"""Initialize from llm using default template.
The prompt used here expects a single input: `question`
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
prompt: prompt template for query generation
Returns:
RePhraseQueryRetriever
"""
llm_chain = prompt | llm | StrOutputParser()
return cls(
retriever=retriever,
llm_chain=llm_chain,
)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""Get relevant documents given a user question.
Args:
query: user question
Returns:
Relevant documents for re-phrased question
"""
re_phrased_question = self.llm_chain.invoke(
query, {"callbacks": run_manager.get_child()}
)
logger.info(f"Re-phrased question: {re_phrased_question}")
docs = self.retriever.invoke(
re_phrased_question, config={"callbacks": run_manager.get_child()}
)
return docs
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
raise NotImplementedError
|
import logging
from typing import List
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLLM
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain_core.runnables import Runnable
logger = logging.getLogger(__name__)
# Default template
DEFAULT_TEMPLATE = """You are an assistant tasked with taking a natural language \
query from a user and converting it into a query for a vectorstore. \
In this process, you strip out information that is not relevant for \
the retrieval task. Here is the user query: {question}"""
# Default prompt
DEFAULT_QUERY_PROMPT = PromptTemplate.from_template(DEFAULT_TEMPLATE)
class RePhraseQueryRetriever(BaseRetriever):
"""Given a query, use an LLM to re-phrase it.
Then, retrieve docs for the re-phrased query."""
retriever: BaseRetriever
llm_chain: Runnable
@classmethod
def from_llm(
cls,
retriever: BaseRetriever,
llm: BaseLLM,
prompt: BasePromptTemplate = DEFAULT_QUERY_PROMPT,
) -> "RePhraseQueryRetriever":
"""Initialize from llm using default template.
The prompt used here expects a single input: `question`
Args:
retriever: retriever to query documents from
llm: llm for query generation using DEFAULT_QUERY_PROMPT
prompt: prompt template for query generation
Returns:
RePhraseQueryRetriever
"""
llm_chain = prompt | llm | StrOutputParser()
return cls(
retriever=retriever,
llm_chain=llm_chain,
)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Get relevant documents given a user question.
Args:
query: user question
Returns:
Relevant documents for re-phrased question
"""
re_phrased_question = self.llm_chain.invoke(
query, {"callbacks": run_manager.get_child()}
)
logger.info(f"Re-phrased question: {re_phrased_question}")
docs = self.retriever.invoke(
re_phrased_question, config={"callbacks": run_manager.get_child()}
)
return docs
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> List[Document]:
raise NotImplementedError
|
"""**Load** module helps with serialization and deserialization."""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.load.dump import dumpd, dumps
from langchain_core.load.load import loads
from langchain_core.load.serializable import Serializable
# Unfortunately, we have to eagerly import load from langchain_core/load/load.py
# eagerly to avoid a namespace conflict. We want users to still be able to use
# `from langchain_core.load import load` to get the load function, but
# the `from langchain_core.load.load import load` absolute import should also work.
from langchain_core.load.load import load
__all__ = ("Serializable", "dumpd", "dumps", "load", "loads")
_dynamic_imports = {
"dumpd": "dump",
"dumps": "dump",
"loads": "load",
"Serializable": "serializable",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Load** module helps with serialization and deserialization."""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.load.dump import dumpd, dumps
from langchain_core.load.load import loads
from langchain_core.load.serializable import Serializable
# Unfortunately, we have to eagerly import load from langchain_core/load/load.py
# eagerly to avoid a namespace conflict. We want users to still be able to use
# `from langchain_core.load import load` to get the load function, but
# the `from langchain_core.load.load import load` absolute import should also work.
from langchain_core.load.load import load
__all__ = ("dumpd", "dumps", "load", "loads", "Serializable")
_dynamic_imports = {
"dumpd": "dump",
"dumps": "dump",
"loads": "load",
"Serializable": "serializable",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
# pyre-strict
# mypy: allow-untyped-defs
import abc
import os
from concurrent.futures import Future
from typing import Optional, Union
import torch.distributed as dist
from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
from torch.distributed.checkpoint.planner import SavePlanner
from torch.distributed.checkpoint.storage import StorageWriter
class _AsyncCheckpointExecutor(abc.ABC):
@abc.abstractmethod
def execute_save(
self,
staged_state_dict_future: Union[STATE_DICT_TYPE, Future[STATE_DICT_TYPE]],
*,
checkpoint_id: Union[str, os.PathLike, None] = None,
storage_writer: Optional[StorageWriter] = None,
planner: Optional[SavePlanner] = None,
process_group: Optional[dist.ProcessGroup] = None,
) -> Future:
"""
Execute the checkpoint save request asynchronously.
This method is intended to be used as an abstraction for
implementing async checkpointing. The actual checkpoint save
operation is executed in a separate thread or process depending
on the implementation of this interface.
"""
|
# pyre-strict
# mypy: allow-untyped-defs
import abc
import os
from concurrent.futures import Future
from typing import Optional, Union
import torch.distributed as dist
from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
from torch.distributed.checkpoint.planner import SavePlanner
from torch.distributed.checkpoint.storage import StorageWriter
class _AsyncCheckpointExecutor(abc.ABC):
@abc.abstractmethod
def execute_save(
self,
staged_state_dict: STATE_DICT_TYPE,
*,
checkpoint_id: Union[str, os.PathLike, None] = None,
storage_writer: Optional[StorageWriter] = None,
planner: Optional[SavePlanner] = None,
process_group: Optional[dist.ProcessGroup] = None,
) -> Future:
"""
Execute the checkpoint save request asynchronously.
This method is intended to be used as an abstraction for
implementing async checkpointing. The actual checkpoint save
operation is executed in a separate thread or process depending
on the implementation of this interface.
"""
|
#!/usr/bin/env python3
"""The demo script for testing the pre-trained Emformer RNNT pipelines.
Example:
python pipeline_demo.py --model-type librispeech --dataset-path ./datasets/librispeech
"""
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from common import MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_MUSTC, MODEL_TYPE_TEDLIUM3
from mustc.dataset import MUSTC
from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
from torchaudio.prototype.pipelines import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
logger = logging.getLogger(__name__)
@dataclass
class Config:
dataset: Callable
bundle: RNNTBundle
_CONFIGS = {
MODEL_TYPE_LIBRISPEECH: Config(
partial(torchaudio.datasets.LIBRISPEECH, url="test-clean"),
EMFORMER_RNNT_BASE_LIBRISPEECH,
),
MODEL_TYPE_MUSTC: Config(
partial(MUSTC, subset="tst-COMMON"),
EMFORMER_RNNT_BASE_MUSTC,
),
MODEL_TYPE_TEDLIUM3: Config(
partial(torchaudio.datasets.TEDLIUM, release="release3", subset="test"),
EMFORMER_RNNT_BASE_TEDLIUM3,
),
}
def run_eval_streaming(args):
dataset = _CONFIGS[args.model_type].dataset(args.dataset_path)
bundle = _CONFIGS[args.model_type].bundle
decoder = bundle.get_decoder()
token_processor = bundle.get_token_processor()
feature_extractor = bundle.get_feature_extractor()
streaming_feature_extractor = bundle.get_streaming_feature_extractor()
hop_length = bundle.hop_length
num_samples_segment = bundle.segment_length * hop_length
num_samples_segment_right_context = num_samples_segment + bundle.right_context_length * hop_length
for idx in range(10):
sample = dataset[idx]
waveform = sample[0].squeeze()
# Streaming decode.
state, hypothesis = None, None
for idx in range(0, len(waveform), num_samples_segment):
segment = waveform[idx : idx + num_samples_segment_right_context]
segment = torch.nn.functional.pad(segment, (0, num_samples_segment_right_context - len(segment)))
with torch.no_grad():
features, length = streaming_feature_extractor(segment)
hypos, state = decoder.infer(features, length, 10, state=state, hypothesis=hypothesis)
hypothesis = hypos[0]
transcript = token_processor(hypothesis[0], lstrip=False)
print(transcript, end="", flush=True)
print()
# Non-streaming decode.
with torch.no_grad():
features, length = feature_extractor(waveform)
hypos = decoder(features, length, 10)
print(token_processor(hypos[0][0]))
print()
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--model-type", type=str, choices=_CONFIGS.keys(), required=True)
parser.add_argument(
"--dataset-path",
type=pathlib.Path,
help="Path to dataset.",
required=True,
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
run_eval_streaming(args)
if __name__ == "__main__":
cli_main()
|
#!/usr/bin/env python3
"""The demo script for testing the pre-trained Emformer RNNT pipelines.
Example:
python pipeline_demo.py --model-type librispeech --dataset-path ./datasets/librispeech
"""
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from common import MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_MUSTC, MODEL_TYPE_TEDLIUM3
from mustc.dataset import MUSTC
from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
from torchaudio.prototype.pipelines import (
EMFORMER_RNNT_BASE_MUSTC,
EMFORMER_RNNT_BASE_TEDLIUM3,
)
logger = logging.getLogger(__name__)
@dataclass
class Config:
dataset: Callable
bundle: RNNTBundle
_CONFIGS = {
MODEL_TYPE_LIBRISPEECH: Config(
partial(torchaudio.datasets.LIBRISPEECH, url="test-clean"),
EMFORMER_RNNT_BASE_LIBRISPEECH,
),
MODEL_TYPE_MUSTC: Config(
partial(MUSTC, subset="tst-COMMON"),
EMFORMER_RNNT_BASE_MUSTC,
),
MODEL_TYPE_TEDLIUM3: Config(
partial(torchaudio.datasets.TEDLIUM, release="release3", subset="test"),
EMFORMER_RNNT_BASE_TEDLIUM3,
),
}
def run_eval_streaming(args):
dataset = _CONFIGS[args.model_type].dataset(args.dataset_path)
bundle = _CONFIGS[args.model_type].bundle
decoder = bundle.get_decoder()
token_processor = bundle.get_token_processor()
feature_extractor = bundle.get_feature_extractor()
streaming_feature_extractor = bundle.get_streaming_feature_extractor()
hop_length = bundle.hop_length
num_samples_segment = bundle.segment_length * hop_length
num_samples_segment_right_context = num_samples_segment + bundle.right_context_length * hop_length
for idx in range(10):
sample = dataset[idx]
waveform = sample[0].squeeze()
# Streaming decode.
state, hypothesis = None, None
for idx in range(0, len(waveform), num_samples_segment):
segment = waveform[idx : idx + num_samples_segment_right_context]
segment = torch.nn.functional.pad(segment, (0, num_samples_segment_right_context - len(segment)))
with torch.no_grad():
features, length = streaming_feature_extractor(segment)
hypos, state = decoder.infer(features, length, 10, state=state, hypothesis=hypothesis)
hypothesis = hypos[0]
transcript = token_processor(hypothesis[0], lstrip=False)
print(transcript, end="", flush=True)
print()
# Non-streaming decode.
with torch.no_grad():
features, length = feature_extractor(waveform)
hypos = decoder(features, length, 10)
print(token_processor(hypos[0][0]))
print()
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--model-type", type=str, choices=_CONFIGS.keys(), required=True)
parser.add_argument(
"--dataset-path",
type=pathlib.Path,
help="Path to dataset.",
required=True,
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
run_eval_streaming(args)
if __name__ == "__main__":
cli_main()
|
_base_ = 'ssd300_coco.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.1, 0.9),
strides=[8, 16, 32, 64, 128, 256, 512],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]])))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
|
_base_ = 'ssd300_coco.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.1, 0.9),
strides=[8, 16, 32, 64, 128, 256, 512],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]])))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
|
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py']
# Use ClassAwareSampler
train_dataloader = dict(
sampler=dict(_delete_=True, type='ClassAwareSampler', num_sample_class=1))
|
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py']
# Use ClassAwareSampler
data = dict(
train_dataloader=dict(class_aware_sampler=dict(num_sample_class=1)))
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.base_document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class AbstractType(BaseNode):
_proto_type_name: Optional[str] = None
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
@abstractmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
...
@classmethod
@abstractmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
...
@abstractmethod
def _to_node_protobuf(self: T) -> 'NodeProto':
...
def _docarray_to_json_compatible(self):
"""
Convert itself into a json compatible object
:return: a representation of the tensor compatible with orjson
"""
return self
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.base_document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class AbstractType(BaseNode):
_proto_type_name: str
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
@abstractmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
...
@classmethod
@abstractmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
...
@abstractmethod
def _to_node_protobuf(self: T) -> 'NodeProto':
...
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
from mmdet.datasets.pipelines import (LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(__file__), '../../data')
def test_load_img(self):
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == osp.join(self.data_prefix, 'color.jpg')
assert results['ori_filename'] == 'color.jpg'
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='color', channel_order='bgr', " + \
"file_client_args={'backend': 'disk'})"
# no img_prefix
results = dict(
img_prefix=None, img_info=dict(filename='tests/data/color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == 'tests/data/color.jpg'
assert results['ori_filename'] == 'tests/data/color.jpg'
assert results['img'].shape == (288, 512, 3)
# to_float32
transform = LoadImageFromFile(to_float32=True)
results = transform(copy.deepcopy(results))
assert results['img'].dtype == np.float32
# gray image
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
transform = LoadImageFromFile(color_type='unchanged')
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512)
assert results['img'].dtype == np.uint8
def test_load_multi_channel_img(self):
results = dict(
img_prefix=self.data_prefix,
img_info=dict(filename=['color.jpg', 'color.jpg']))
transform = LoadMultiChannelImageFromFiles()
results = transform(copy.deepcopy(results))
assert results['filename'] == [
osp.join(self.data_prefix, 'color.jpg'),
osp.join(self.data_prefix, 'color.jpg')
]
assert results['ori_filename'] == ['color.jpg', 'color.jpg']
assert results['img'].shape == (288, 512, 3, 2)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3, 2)
assert results['ori_shape'] == (288, 512, 3, 2)
assert results['pad_shape'] == (288, 512, 3, 2)
assert results['scale_factor'] == 1.0
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='unchanged', " + \
"file_client_args={'backend': 'disk'})"
def test_load_webcam_img(self):
img = mmcv.imread(osp.join(self.data_prefix, 'color.jpg'))
results = dict(img=img)
transform = LoadImageFromWebcam()
results = transform(copy.deepcopy(results))
assert results['filename'] is None
assert results['ori_filename'] is None
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
from mmdet.datasets.pipelines import (LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(__file__), '../../data')
def test_load_img(self):
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == osp.join(self.data_prefix, 'color.jpg')
assert results['ori_filename'] == 'color.jpg'
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='color', " + \
"file_client_args={'backend': 'disk'})"
# no img_prefix
results = dict(
img_prefix=None, img_info=dict(filename='tests/data/color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == 'tests/data/color.jpg'
assert results['ori_filename'] == 'tests/data/color.jpg'
assert results['img'].shape == (288, 512, 3)
# to_float32
transform = LoadImageFromFile(to_float32=True)
results = transform(copy.deepcopy(results))
assert results['img'].dtype == np.float32
# gray image
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
transform = LoadImageFromFile(color_type='unchanged')
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512)
assert results['img'].dtype == np.uint8
def test_load_multi_channel_img(self):
results = dict(
img_prefix=self.data_prefix,
img_info=dict(filename=['color.jpg', 'color.jpg']))
transform = LoadMultiChannelImageFromFiles()
results = transform(copy.deepcopy(results))
assert results['filename'] == [
osp.join(self.data_prefix, 'color.jpg'),
osp.join(self.data_prefix, 'color.jpg')
]
assert results['ori_filename'] == ['color.jpg', 'color.jpg']
assert results['img'].shape == (288, 512, 3, 2)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3, 2)
assert results['ori_shape'] == (288, 512, 3, 2)
assert results['pad_shape'] == (288, 512, 3, 2)
assert results['scale_factor'] == 1.0
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='unchanged', " + \
"file_client_args={'backend': 'disk'})"
def test_load_webcam_img(self):
img = mmcv.imread(osp.join(self.data_prefix, 'color.jpg'))
results = dict(img=img)
transform = LoadImageFromWebcam()
results = transform(copy.deepcopy(results))
assert results['filename'] is None
assert results['ori_filename'] is None
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_ltx import AutoencoderKLLTXVideo
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
from abc import abstractmethod
from typing import Iterable, Type
from docarray.document import BaseDocument
class AbstractDocumentArray(Iterable):
document_type: Type[BaseDocument]
@abstractmethod
def __init__(self, docs: Iterable[BaseDocument]):
...
@abstractmethod
def __class_getitem__(
cls, item: Type[BaseDocument]
) -> Type['AbstractDocumentArray']:
...
|
from abc import abstractmethod
from typing import Iterable, Type
from docarray.document import BaseDocument
from docarray.document.abstract_document import AbstractDocument
class AbstractDocumentArray(Iterable):
document_type: Type[BaseDocument]
@abstractmethod
def __init__(self, docs: Iterable[AbstractDocument]):
...
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model, ".")
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
Model Sparsity Stats Query : Row Non-Zero Mean: 105.4530029296875, Row Sparsity Mean: 0.9965449571609497
Model Sparsity Stats Corpus : Row Non-Zero Mean: 69.18349838256836, Row Sparsity Mean: 0.9977333247661591
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseTripletEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
|
import torch
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseDistillKLDivLoss,
SparseEncoder,
SparseEncoderTrainer,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create a small toy dataset
train_dataset = Dataset.from_dict(
{
"query": ["It's nice weather outside today.", "He drove to work."],
"passage1": ["It's so sunny.", "He took the car to work."],
"passage2": ["It's very cold.", "She walked to the store."],
"passage3": ["Its rainy", "She took the bus"],
}
)
def compute_labels(batch):
emb_queries = teacher_model.encode(batch["query"])
emb_passages1 = teacher_model.encode(batch["passage1"])
emb_passages2 = teacher_model.encode(batch["passage2"])
emb_passages3 = teacher_model.encode(batch["passage3"])
return {
"label": torch.stack(
[
teacher_model.similarity_pairwise(emb_queries, emb_passages1),
teacher_model.similarity_pairwise(emb_queries, emb_passages2),
teacher_model.similarity_pairwise(emb_queries, emb_passages3),
],
dim=1,
)
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = SparseDistillKLDivLoss(student_model, similarity_fct=student_model.similarity_pairwise)
trainer = SparseEncoderTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
|
import torch
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseDistillKLDivLoss,
SparseEncoder,
SparseEncoderTrainer,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
train_dataset = Dataset.from_dict(
{
"query": ["It's nice weather outside today.", "He drove to work."],
"passage1": ["It's so sunny.", "He took the car to work."],
"passage2": ["It's very sunny.", "She walked to the store."],
}
)
def compute_labels(batch):
emb_queries = teacher_model.encode(batch["query"])
emb_passages1 = teacher_model.encode(batch["passage1"])
emb_passages2 = teacher_model.encode(batch["passage2"])
return {
"label": torch.Tensor(
[
teacher_model.similarity_pairwise(emb_queries, emb_passages1),
teacher_model.similarity_pairwise(emb_queries, emb_passages2),
]
)
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = SparseDistillKLDivLoss(student_model, similarity_fct=student_model.similarity_pairwise)
trainer = SparseEncoderTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
|
import time
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_faiss
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# 2. Come up with some queries
queries = [
"How do I become a good programmer?",
"How do I become a good data scientist?",
]
# 3. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 4. Choose a target precision for the corpus embeddings
corpus_precision = "ubinary"
# Valid options are: "float32", "uint8", "int8", "ubinary", and "binary"
# But FAISS only supports "float32", "uint8", and "ubinary"
# 5. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# Initially, we don't have a FAISS index yet, we can use semantic_search_faiss to create it
corpus_index = None
while True:
# 7. Encode the queries using the full precision
start_time = time.time()
query_embeddings = model.encode(queries, normalize_embeddings=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 8. Perform semantic search using FAISS
results, search_time, corpus_index = semantic_search_faiss(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=4,
exact=True,
output_index=True,
)
# This is a helper function to showcase how FAISS can be used with quantized embeddings.
# You must either provide the `corpus_embeddings` or the `corpus_index` FAISS index, but not both.
# In the first call we'll provide the `corpus_embeddings` and get the `corpus_index` back, which
# we'll use in the next call. In practice, the index is stored in RAM or saved to disk, and not
# recalculated for every query.
# This function will 1) quantize the query embeddings to the same precision as the corpus embeddings,
# 2) perform the semantic search using FAISS, 3) rescore the results using the full precision embeddings,
# and 4) return the results and the search time (and perhaps the FAISS index).
# `corpus_precision` must be the same as the precision used to quantize the corpus embeddings.
# It is used to convert the query embeddings to the same precision as the corpus embeddings.
# `top_k` determines how many results are returned for each query.
# `rescore_multiplier` is a parameter for the rescoring step. Rather than searching for the top_k results,
# we search for top_k * rescore_multiplier results and rescore the top_k results using the full precision embeddings.
# So, higher values of rescore_multiplier will give better results, but will be slower.
# `calibration_embeddings` is a set of embeddings used to calibrate the quantization of the query embeddings.
# This is important only if you are using uint8 or int8 precision. In practice, this is used to calculate
# the minimum and maximum values of each of the embedding dimensions, which are then used to determine the
# quantization thresholds.
# `rescore` determines whether to rescore the results using the full precision embeddings, if False & the
# corpus is quantized, the results will be very poor. `exact` determines whether to use the exact search
# or the approximate search method in FAISS.
# 9. Output the results
print("Precision:", corpus_precision)
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
import time
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_faiss
from datasets import load_dataset
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# 2. Come up with some queries
queries = [
"How do I become a good programmer?",
"How do I become a good data scientist?",
]
# 3. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 4. Choose a target precision for the corpus embeddings
corpus_precision = "ubinary"
# Valid options are: "float32", "uint8", "int8", "ubinary", and "binary"
# But FAISS only supports "float32", "uint8", and "ubinary"
# 5. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# Initially, we don't have a FAISS index yet, we can use semantic_search_faiss to create it
corpus_index = None
while True:
# 7. Encode the queries using the full precision
start_time = time.time()
query_embeddings = model.encode(queries, normalize_embeddings=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 8. Perform semantic search using FAISS
results, search_time, corpus_index = semantic_search_faiss(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=4,
exact=True,
output_index=True,
)
# This is a helper function to showcase how FAISS can be used with quantized embeddings.
# You must either provide the `corpus_embeddings` or the `corpus_index` FAISS index, but not both.
# In the first call we'll provide the `corpus_embeddings` and get the `corpus_index` back, which
# we'll use in the next call. In practice, the index is stored in RAM or saved to disk, and not
# recalculated for every query.
# This function will 1) quantize the query embeddings to the same precision as the corpus embeddings,
# 2) perform the semantic search using FAISS, 3) rescore the results using the full precision embeddings,
# and 4) return the results and the search time (and perhaps the FAISS index).
# `corpus_precision` must be the same as the precision used to quantize the corpus embeddings.
# It is used to convert the query embeddings to the same precision as the corpus embeddings.
# `top_k` determines how many results are returned for each query.
# `rescore_multiplier` is a parameter for the rescoring step. Rather than searching for the top_k results,
# we search for top_k * rescore_multiplier results and rescore the top_k results using the full precision embeddings.
# So, higher values of rescore_multiplier will give better results, but will be slower.
# `calibration_embeddings` is a set of embeddings used to calibrate the quantization of the query embeddings.
# This is important only if you are using uint8 or int8 precision. In practice, this is used to calculate
# the minimum and maximum values of each of the embedding dimensions, which are then used to determine the
# quantization thresholds.
# `rescore` determines whether to rescore the results using the full precision embeddings, if False & the
# corpus is quantized, the results will be very poor. `exact` determines whether to use the exact search
# or the approximate search method in FAISS.
# 9. Output the results
print("Precision:", corpus_precision)
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of resetting the current ``Resgitry`` by``default_scope``
in the internal module which cannot access runner directly, it is difficult
to get the ``default_scope`` defined in ``Runner``. However, if ``Runner``
created ``DefaultScope`` instance by given ``default_scope``, the internal
module can get ``default_scope`` by ``DefaultScope.get_current_instance``
everywhere.
Args:
name (str): Name of default scope for global access.
scope_name (str): Scope of current task.
Examples:
>>> from mmengine import MODELS
>>> # Define default scope in runner.
>>> DefaultScope.get_instance('task', scope_name='mmdet')
>>> # Get default scope globally.
>>> scope_name = DefaultScope.get_instance('task').scope_name
"""
def __init__(self, name: str, scope_name: str):
super().__init__(name)
self._scope_name = scope_name
@property
def scope_name(self) -> str:
"""
Returns:
str: Get current scope.
"""
return self._scope_name
@classmethod
def get_current_instance(cls) -> Optional['DefaultScope']:
"""Get latest created default scope.
Since default_scope is an optional argument for ``Registry.build``.
``get_current_instance`` should return ``None`` if there is no
``DefaultScope`` created.
Examples:
>>> default_scope = DefaultScope.get_current_instance()
>>> # There is no `DefaultScope` created yet,
>>> # `get_current_instance` return `None`.
>>> default_scope = DefaultScope.get_instance(
>>> 'instance_name', scope_name='mmengine')
>>> default_scope.scope_name
mmengine
>>> default_scope = DefaultScope.get_current_instance()
>>> default_scope.scope_name
mmengine
Returns:
Optional[DefaultScope]: Return None If there has not been
``DefaultScope`` instance created yet, otherwise return the
latest created DefaultScope instance.
"""
_accquire_lock()
if cls._instance_dict:
instance = super(DefaultScope, cls).get_current_instance()
else:
instance = None
_release_lock()
return instance
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
from mmengine.utils.manager import ManagerMixin, _accquire_lock, _release_lock
class DefaultScope(ManagerMixin):
"""Scope of current task used to reset the current registry, which can be
accessed globally.
Consider the case of resetting the current ``Resgitry`` by``default_scope``
in the internal module which cannot access runner directly, it is difficult
to get the ``default_scope`` defined in ``Runner``. However, if ``Runner``
created ``DefaultScope`` instance by given ``default_scope``, the internal
module can get ``default_scope`` by ``DefaultScope.get_current_instance``
everywhere.
Args:
name (str): Name of default scope for global access.
scope_name (str): Scope of current task.
Examples:
>>> from mmengine import MODELS
>>> # Define default scope in runner.
>>> DefaultScope.get_instance('task', scope_name='mmdet')
>>> # Get default scope globally.
>>> scope_name = DefaultScope.get_instance('task').scope_name
>>> # build model from cfg.
>>> model = MODELS.build(model_cfg, default_scope=scope_name)
"""
def __init__(self, name: str, scope_name: str):
super().__init__(name)
self._scope_name = scope_name
@property
def scope_name(self) -> str:
"""
Returns:
str: Get current scope.
"""
return self._scope_name
@classmethod
def get_current_instance(cls) -> Optional['DefaultScope']:
"""Get latest created default scope.
Since default_scope is an optional argument for ``Registry.build``.
``get_current_instance`` should return ``None`` if there is no
``DefaultScope`` created.
Examples:
>>> default_scope = DefaultScope.get_current_instance()
>>> # There is no `DefaultScope` created yet,
>>> # `get_current_instance` return `None`.
>>> default_scope = DefaultScope.get_instance(
>>> 'instance_name', scope_name='mmengine')
>>> default_scope.scope_name
mmengine
>>> default_scope = DefaultScope.get_current_instance()
>>> default_scope.scope_name
mmengine
Returns:
Optional[DefaultScope]: Return None If there has not been
``DefaultScope`` instance created yet, otherwise return the
latest created DefaultScope instance.
"""
_accquire_lock()
if cls._instance_dict:
instance = super(DefaultScope, cls).get_current_instance()
else:
instance = None
_release_lock()
return instance
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from .generation import Llama, Dialog
from .model import ModelArgs, Transformer
from .tokenizer import Tokenizer
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from .generation import Llama
from .model import ModelArgs, Transformer
from .tokenizer import Tokenizer
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataSample]]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._before_epoch = before_epoch
self._after_epoch = after_epoch
self._after_iter = after_iter
def after_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs:
Optional[Union[dict, Sequence[BaseDataSample]]] = None)\
-> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
"""
if self._after_iter:
torch.cuda.empty_cache()
def before_epoch(self, runner) -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
"""
if self._before_epoch:
torch.cuda.empty_cache()
def after_epoch(self, runner) -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
"""
if self._after_epoch:
torch.cuda.empty_cache()
|
from typing import TYPE_CHECKING, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
T = TypeVar('T', bound='Mesh3DUrl')
@_register_proto(proto_type_name='mesh_url')
class Mesh3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
def load(self: T) -> 'VerticesAndFaces':
"""
Load the data from the url into a VerticesAndFaces object containing
vertices and faces information.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import Mesh3DUrl, NdArray
class MyDoc(BaseDocument):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="toydata/tetrahedron.obj")
tensors = doc.mesh_url.load()
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, NdArray)
:return: VerticesAndFaces object containing vertices and faces information.
"""
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
mesh = self._load_trimesh_instance(force='mesh')
vertices = parse_obj_as(NdArray, mesh.vertices.view(np.ndarray))
faces = parse_obj_as(NdArray, mesh.faces.view(np.ndarray))
return VerticesAndFaces(vertices=vertices, faces=faces)
def display(self) -> None:
"""
Plot mesh from url.
This loads the Trimesh instance of the 3D mesh, and then displays it.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
"""
from IPython.display import display
mesh = self._load_trimesh_instance()
display(mesh.show())
|
from typing import NamedTuple, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
T = TypeVar('T', bound='Mesh3DUrl')
class Mesh3DLoadResult(NamedTuple):
vertices: NdArray
faces: NdArray
@_register_proto(proto_type_name='mesh_url')
class Mesh3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
def load(self: T) -> Mesh3DLoadResult:
"""
Load the data from the url into a named tuple of two NdArrays containing
vertices and faces information.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import Mesh3DUrl
class MyDoc(BaseDocument):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="toydata/tetrahedron.obj")
vertices, faces = doc.mesh_url.load()
assert isinstance(vertices, np.ndarray)
assert isinstance(faces, np.ndarray)
:return: named tuple of two NdArrays representing the mesh's vertices and faces
"""
mesh = self._load_trimesh_instance(force='mesh')
vertices = parse_obj_as(NdArray, mesh.vertices.view(np.ndarray))
faces = parse_obj_as(NdArray, mesh.faces.view(np.ndarray))
return Mesh3DLoadResult(vertices=vertices, faces=faces)
|
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
try:
import jax.numpy as jnp # type: ignore # noqa: F401
except (ImportError, TypeError):
jnp_imported = False
else:
jnp_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[proto]"',
'lz4': '"docarray[proto]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'elastic_transport': '"docarray[elasticsearch]"',
'weaviate': '"docarray[weaviate]"',
'qdrant_client': '"docarray[qdrant]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
'redis': '"docarray[redis]"',
'pymilvus': '"docarray[milvus]"',
}
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_jax_available():
return jnp_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
try:
import jax.numpy as jnp # type: ignore # noqa: F401
except (ImportError, TypeError):
jnp_imported = False
else:
jnp_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[proto]"',
'lz4': '"docarray[proto]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'elastic_transport': '"docarray[elasticsearch]"',
'weaviate': '"docarray[weaviate]"',
'qdrant_client': '"docarray[qdrant]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'hubble': '"docarray[jac]"',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
'redis': '"docarray[redis]"',
'pymilvus': '"docarray[milvus]"',
}
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_jax_available():
return jnp_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.17.1"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.17.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel
from torch.nn.parallel.distributed import DistributedDataParallel
from mmengine.model.wrappers import (MMDataParallel, MMDistributedDataParallel,
is_model_wrapper)
from mmengine.registry import MODEL_WRAPPERS
def mock(*args, **kwargs):
pass
@patch('torch.distributed._broadcast_coalesced', mock)
@patch('torch.distributed.broadcast', mock)
@patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
def test_is_model_wrapper():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
# _verify_model_across_ranks is added in torch1.9.0 so we should check
# whether _verify_model_across_ranks is the member of torch.distributed
# before mocking
if hasattr(torch.distributed, '_verify_model_across_ranks'):
torch.distributed._verify_model_across_ranks = mock
model = Model()
assert not is_model_wrapper(model)
mmdp = MMDataParallel(model)
assert is_model_wrapper(mmdp)
mmddp = MMDistributedDataParallel(model, process_group=MagicMock())
assert is_model_wrapper(mmddp)
torch_dp = DataParallel(model)
assert is_model_wrapper(torch_dp)
torch_ddp = DistributedDataParallel(model, process_group=MagicMock())
assert is_model_wrapper(torch_ddp)
# test model wrapper registry
@MODEL_WRAPPERS.register_module()
class ModelWrapper(object):
def __init__(self, module):
self.module = module
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
model_wrapper = ModelWrapper(model)
assert is_model_wrapper(model_wrapper)
class TestMMDataParallel(TestCase):
def setUp(self):
"""Setup the demo image in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
def train_step(self, x):
return self.forward(x)
def val_step(self, x):
return self.forward(x)
self.model = Model()
def test_train_step(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
model = Model()
mmdp = MMDataParallel(model)
# test without train_step attribute
with pytest.raises(AssertionError):
mmdp.train_step(torch.zeros([1, 1, 3, 3]))
out = self.model.train_step(torch.zeros([1, 1, 3, 3]))
assert out.shape == (1, 2, 3, 3)
def test_val_step(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
model = Model()
mmdp = MMDataParallel(model)
# test without val_step attribute
with pytest.raises(AssertionError):
mmdp.val_step(torch.zeros([1, 1, 3, 3]))
out = self.model.val_step(torch.zeros([1, 1, 3, 3]))
assert out.shape == (1, 2, 3, 3)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from mmengine.model.wrappers import (MMDataParallel, MMDistributedDataParallel,
is_model_wrapper)
from mmengine.registry import MODEL_WRAPPERS
def mock(*args, **kwargs):
pass
@patch('torch.distributed._broadcast_coalesced', mock)
@patch('torch.distributed.broadcast', mock)
@patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
def test_is_model_wrapper():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
# _verify_model_across_ranks is added in torch1.9.0 so we should check
# whether _verify_model_across_ranks is the member of torch.distributed
# before mocking
if hasattr(torch.distributed, '_verify_model_across_ranks'):
torch.distributed._verify_model_across_ranks = mock
model = Model()
assert not is_model_wrapper(model)
mmdp = MMDataParallel(model)
assert is_model_wrapper(mmdp)
mmddp = MMDistributedDataParallel(model, process_group=MagicMock())
assert is_model_wrapper(mmddp)
# test model wrapper registry
@MODEL_WRAPPERS.register_module()
class ModelWrapper(object):
def __init__(self, module):
self.module = module
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
model_wrapper = ModelWrapper(model)
assert is_model_wrapper(model_wrapper)
class TestMMDataParallel(TestCase):
def setUp(self):
"""Setup the demo image in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
def train_step(self, x):
return self.forward(x)
def val_step(self, x):
return self.forward(x)
self.model = Model()
def test_train_step(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
model = Model()
mmdp = MMDataParallel(model)
# test without train_step attribute
with pytest.raises(AssertionError):
mmdp.train_step(torch.zeros([1, 1, 3, 3]))
out = self.model.train_step(torch.zeros([1, 1, 3, 3]))
assert out.shape == (1, 2, 3, 3)
def test_val_step(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
model = Model()
mmdp = MMDataParallel(model)
# test without val_step attribute
with pytest.raises(AssertionError):
mmdp.val_step(torch.zeros([1, 1, 3, 3]))
out = self.model.val_step(torch.zeros([1, 1, 3, 3]))
assert out.shape == (1, 2, 3, 3)
|
from typing import Any
import torch
__all__ = [
"LSTM",
]
class LSTM(torch.ao.nn.quantizable.LSTM):
r"""A quantized long short-term memory (LSTM).
For the description and the argument types, please, refer to :class:`~torch.nn.LSTM`
Attributes:
layers : instances of the `_LSTMLayer`
.. note::
To access the weights and biases, you need to access them per layer.
See examples in :class:`~torch.ao.nn.quantizable.LSTM`
Examples::
>>> # xdoctest: +SKIP
>>> custom_module_config = {
... 'float_to_observed_custom_module_class': {
... nn.LSTM: nn.quantizable.LSTM,
... },
... 'observed_to_quantized_custom_module_class': {
... nn.quantizable.LSTM: nn.quantized.LSTM,
... }
... }
>>> tq.prepare(model, prepare_custom_module_class=custom_module_config)
>>> tq.convert(model, convert_custom_module_class=custom_module_config)
"""
_FLOAT_MODULE = torch.ao.nn.quantizable.LSTM # type: ignore[assignment]
def _get_name(self) -> str:
return "QuantizedLSTM"
@classmethod
def from_float(cls, *args: Any, **kwargs: Any) -> None:
# The whole flow is float -> observed -> quantized
# This class does observed -> quantized only
raise NotImplementedError(
"It looks like you are trying to convert a "
"non-observed LSTM module. Please, see "
"the examples on quantizable LSTMs."
)
@classmethod
def from_observed(cls: type["LSTM"], other: torch.ao.nn.quantizable.LSTM) -> "LSTM":
assert isinstance(other, cls._FLOAT_MODULE) # type: ignore[has-type]
converted = torch.ao.quantization.convert(
other, inplace=False, remove_qconfig=True
)
converted.__class__ = cls
return converted
|
# mypy: allow-untyped-defs
import torch
__all__ = [
"LSTM",
]
class LSTM(torch.ao.nn.quantizable.LSTM):
r"""A quantized long short-term memory (LSTM).
For the description and the argument types, please, refer to :class:`~torch.nn.LSTM`
Attributes:
layers : instances of the `_LSTMLayer`
.. note::
To access the weights and biases, you need to access them per layer.
See examples in :class:`~torch.ao.nn.quantizable.LSTM`
Examples::
>>> # xdoctest: +SKIP
>>> custom_module_config = {
... 'float_to_observed_custom_module_class': {
... nn.LSTM: nn.quantizable.LSTM,
... },
... 'observed_to_quantized_custom_module_class': {
... nn.quantizable.LSTM: nn.quantized.LSTM,
... }
... }
>>> tq.prepare(model, prepare_custom_module_class=custom_module_config)
>>> tq.convert(model, convert_custom_module_class=custom_module_config)
"""
_FLOAT_MODULE = torch.ao.nn.quantizable.LSTM # type: ignore[assignment]
def _get_name(self):
return "QuantizedLSTM"
@classmethod
def from_float(cls, *args, **kwargs):
# The whole flow is float -> observed -> quantized
# This class does observed -> quantized only
raise NotImplementedError(
"It looks like you are trying to convert a "
"non-observed LSTM module. Please, see "
"the examples on quantizable LSTMs."
)
@classmethod
def from_observed(cls, other):
assert isinstance(other, cls._FLOAT_MODULE) # type: ignore[has-type]
converted = torch.ao.quantization.convert(
other, inplace=False, remove_qconfig=True
)
converted.__class__ = cls
return converted
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmpretrain
# import mmpretrain.models to trigger register_module in mmpretrain
custom_imports = dict(
imports=['mmpretrain.models'], allow_failed_imports=False)
model = dict(
backbone=dict(
_delete_=True,
type='mmpretrain.TIMMBackbone',
model_name='tv_resnet50', # ResNet-50 with torchvision weights
features_only=True,
pretrained=True,
out_indices=(1, 2, 3, 4)))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.TIMMBackbone',
model_name='tv_resnet50', # ResNet-50 with torchvision weights
features_only=True,
pretrained=True,
out_indices=(1, 2, 3, 4)))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path
import pytest
from mmengine.config.utils import (_get_external_cfg_base_path,
_get_package_and_cfg_path)
def test_get_external_cfg_base_path(tmp_path):
package_path = tmp_path
rel_cfg_path = os.path.join('cfg_dir', 'cfg_file')
with pytest.raises(FileNotFoundError):
_get_external_cfg_base_path(str(package_path), rel_cfg_path)
cfg_dir = tmp_path / '.mim' / 'configs' / 'cfg_dir'
cfg_dir.mkdir(parents=True, exist_ok=True)
f = open(cfg_dir / 'cfg_file', 'w')
f.close()
cfg_path = _get_external_cfg_base_path(str(package_path), rel_cfg_path)
assert cfg_path == f'{os.path.join(str(cfg_dir), "cfg_file")}'
def test_get_external_cfg_path():
external_cfg_path = 'mmdet::path/cfg'
package, rel_cfg_path = _get_package_and_cfg_path(external_cfg_path)
assert package == 'mmdet'
assert rel_cfg_path == 'path/cfg'
# external config must contain `::`.
external_cfg_path = 'path/cfg'
with pytest.raises(ValueError):
_get_package_and_cfg_path(external_cfg_path)
# Use `:::` as operator will raise an error.
external_cfg_path = 'mmdet:::path/cfg'
with pytest.raises(ValueError):
_get_package_and_cfg_path(external_cfg_path)
# Use `:` as operator will raise an error.
external_cfg_path = 'mmdet:path/cfg'
with pytest.raises(ValueError):
_get_package_and_cfg_path(external_cfg_path)
# Too much `::`
external_cfg_path = 'mmdet::path/cfg::error'
with pytest.raises(ValueError):
_get_package_and_cfg_path(external_cfg_path)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path
import pytest
from mmengine.config.utils import (_get_external_cfg_base_path,
_get_package_and_cfg_path)
def test_get_external_cfg_base_path(tmp_path):
package_path = tmp_path
rel_cfg_path = 'cfg_dir/cfg_file'
with pytest.raises(FileNotFoundError):
_get_external_cfg_base_path(str(package_path), rel_cfg_path)
cfg_dir = tmp_path / '.mim' / 'configs' / 'cfg_dir'
cfg_dir.mkdir(parents=True, exist_ok=True)
f = open(cfg_dir / 'cfg_file', 'w')
f.close()
cfg_path = _get_external_cfg_base_path(str(package_path), rel_cfg_path)
assert cfg_path == f'{os.path.join(str(cfg_dir), "cfg_file")}'
def test_get_external_cfg_path():
external_cfg_path = 'mmdet::path/cfg'
package, rel_cfg_path = _get_package_and_cfg_path(external_cfg_path)
assert package == 'mmdet'
assert rel_cfg_path == 'path/cfg'
# external config must contain `::`.
external_cfg_path = 'path/cfg'
with pytest.raises(ValueError):
_get_package_and_cfg_path(external_cfg_path)
# Use `:::` as operator will raise an error.
external_cfg_path = 'mmdet:::path/cfg'
with pytest.raises(ValueError):
_get_package_and_cfg_path(external_cfg_path)
# Use `:` as operator will raise an error.
external_cfg_path = 'mmdet:path/cfg'
with pytest.raises(ValueError):
_get_package_and_cfg_path(external_cfg_path)
# Too much `::`
external_cfg_path = 'mmdet::path/cfg::error'
with pytest.raises(ValueError):
_get_package_and_cfg_path(external_cfg_path)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from laser_encoder import LaserEncoder
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='session')
def basic_encoder() -> LaserEncoder:
return LaserEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_language == 'en'
def test_no_document(basic_encoder: LaserEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
@pytest.mark.gpu
def test_encoding_gpu():
encoder = LaserEncoder(device='cuda')
docs = DocumentArray((Document(text='random text')))
encoder.encode(docs, {})
assert len(docs.get_attributes('embedding')) == 1
assert docs[0].embedding.shape == (1024,)
def test_no_text_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_encoding_cpu(basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text='hello there')])
basic_encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'language, sentence',
[
('en', 'Today is a nice day'),
('es', 'hoy es un buen día'),
('ru', 'сегодня хороший день'),
],
)
def test_languages(language: str, sentence: str, basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text=sentence)])
basic_encoder.encode(docs, {'language': language})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: LaserEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
def test_no_documents():
encoder = LaserEncoder()
docs = []
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert not docs
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: LaserEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: LaserEncoder):
docs = DocumentArray(
[
# Different than usual example - because embeddings suck (manually verified
# using the laser embedings module)
Document(id='A', text='car'),
Document(id='B', text='truck'),
Document(id='C', text='radio'),
Document(id='D', text='TV'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from laser_encoder import LaserEncoder
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='session')
def basic_encoder() -> LaserEncoder:
return LaserEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_language == 'en'
def test_no_document(basic_encoder: LaserEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_encoding_cpu(basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text='hello there')])
basic_encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'language, sentence',
[
('en', 'Today is a nice day'),
('es', 'hoy es un buen día'),
('ru', 'сегодня хороший день'),
],
)
def test_languages(language: str, sentence: str, basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text=sentence)])
basic_encoder.encode(docs, {'language': language})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: LaserEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: LaserEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: LaserEncoder):
docs = DocumentArray(
[
# Different than usual example - because embeddings suck (manually verified
# using the laserembedings module)
Document(id='A', text='car'),
Document(id='B', text='truck'),
Document(id='C', text='radio'),
Document(id='D', text='TV'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
import csv
import gzip
import os
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
def test_BinaryClassificationEvaluator_find_best_f1_and_threshold() -> None:
"""Tests that the F1 score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
(
best_f1,
best_precision,
best_recall,
threshold,
) = evaluation.BinaryClassificationEvaluator.find_best_f1_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True
)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_f1score = f1_score(y_true, y_pred_labels)
assert np.abs(best_f1 - sklearn_f1score) < 1e-6
def test_BinaryClassificationEvaluator_find_best_accuracy_and_threshold() -> None:
"""Tests that the Acc score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
(
max_acc,
threshold,
) = evaluation.BinaryClassificationEvaluator.find_best_acc_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True
)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_acc = accuracy_score(y_true, y_pred_labels)
assert np.abs(max_acc - sklearn_acc) < 1e-6
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
acc = evaluator(model)
assert acc > 0.2
def test_ParaphraseMiningEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the ParaphraseMiningEvaluator can be loaded"""
model = paraphrase_distilroberta_base_v1_model
sentences = {
0: "Hello World",
1: "Hello World!",
2: "The cat is on the table",
3: "On the table the cat is",
}
data_eval = evaluation.ParaphraseMiningEvaluator(sentences, [(0, 1), (2, 3)])
score = data_eval(model)
assert score > 0.99
|
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
import csv
import gzip
import os
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
def test_BinaryClassificationEvaluator_find_best_f1_and_threshold() -> None:
"""Tests that the F1 score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
(
best_f1,
best_precision,
best_recall,
threshold,
) = evaluation.BinaryClassificationEvaluator.find_best_f1_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True
)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_f1score = f1_score(y_true, y_pred_labels)
assert np.abs(best_f1 - sklearn_f1score) < 1e-6
def test_BinaryClassificationEvaluator_find_best_accuracy_and_threshold() -> None:
"""Tests that the Acc score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
(
max_acc,
threshold,
) = evaluation.BinaryClassificationEvaluator.find_best_acc_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True
)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_acc = accuracy_score(y_true, y_pred_labels)
assert np.abs(max_acc - sklearn_acc) < 1e-6
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
acc = evaluator(model)
assert acc > 0.2
def test_ParaphraseMiningEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the ParaphraseMiningEvaluator can be loaded"""
model = paraphrase_distilroberta_base_v1_model
sentences = {
0: "Hello World",
1: "Hello World!",
2: "The cat is on the table",
3: "On the table the cat is",
}
data_eval = evaluation.ParaphraseMiningEvaluator(sentences, [(0, 1), (2, 3)])
score = data_eval(model)
assert score > 0.99
|
_base_ = './fovea_r50_fpn_4x4_1x_coco.py'
model = dict(
bbox_head=dict(
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
# learning policy
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = './fovea_r50_fpn_4x4_1x_coco.py'
model = dict(
bbox_head=dict(
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
import pathlib
from typing import Any, Union
import torch
from torchdata.datapipes.iter import CSVParser, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import OneHotLabel
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "semeion"
@register_info(NAME)
def _info() -> dict[str, Any]:
return dict(categories=[str(i) for i in range(10)])
@register_dataset(NAME)
class SEMEION(Dataset):
"""Semeion dataset
homepage="https://archive.ics.uci.edu/ml/datasets/Semeion+Handwritten+Digit",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> list[OnlineResource]:
data = HttpResource(
"http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data",
sha256="f43228ae3da5ea6a3c95069d53450b86166770e3b719dcc333182128fe08d4b1",
)
return [data]
def _prepare_sample(self, data: tuple[str, ...]) -> dict[str, Any]:
image_data, label_data = data[:256], data[256:-1]
return dict(
image=Image(torch.tensor([float(pixel) for pixel in image_data], dtype=torch.float).reshape(16, 16)),
label=OneHotLabel([int(label) for label in label_data], categories=self._categories),
)
def _datapipe(self, resource_dps: list[IterDataPipe]) -> IterDataPipe[dict[str, Any]]:
dp = resource_dps[0]
dp = CSVParser(dp, delimiter=" ")
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 1_593
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
import torch
from torchdata.datapipes.iter import CSVParser, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import OneHotLabel
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "semeion"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(i) for i in range(10)])
@register_dataset(NAME)
class SEMEION(Dataset):
"""Semeion dataset
homepage="https://archive.ics.uci.edu/ml/datasets/Semeion+Handwritten+Digit",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
"http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data",
sha256="f43228ae3da5ea6a3c95069d53450b86166770e3b719dcc333182128fe08d4b1",
)
return [data]
def _prepare_sample(self, data: Tuple[str, ...]) -> Dict[str, Any]:
image_data, label_data = data[:256], data[256:-1]
return dict(
image=Image(torch.tensor([float(pixel) for pixel in image_data], dtype=torch.float).reshape(16, 16)),
label=OneHotLabel([int(label) for label in label_data], categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVParser(dp, delimiter=" ")
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 1_593
|
__version__ = '0.13.30'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.29'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
"""Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are doing the following to automate device placement if a GPU is available:
- Variables are created on GPU.
- Input data will be placed on GPU at the first `keras.layers.Layer` call.
- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU.
- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy.
"""
from keras.src.backend.torch import core
from keras.src.backend.torch import image
from keras.src.backend.torch import linalg
from keras.src.backend.torch import math
from keras.src.backend.torch import nn
from keras.src.backend.torch import numpy
from keras.src.backend.torch import random
from keras.src.backend.torch.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.torch.core import Variable
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import compute_output_spec
from keras.src.backend.torch.core import cond
from keras.src.backend.torch.core import convert_to_numpy
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import device_scope
from keras.src.backend.torch.core import is_tensor
from keras.src.backend.torch.core import random_seed_dtype
from keras.src.backend.torch.core import scatter
from keras.src.backend.torch.core import shape
from keras.src.backend.torch.core import stop_gradient
from keras.src.backend.torch.core import to_torch_dtype
from keras.src.backend.torch.core import vectorized_map
from keras.src.backend.torch.rnn import cudnn_ok
from keras.src.backend.torch.rnn import gru
from keras.src.backend.torch.rnn import lstm
from keras.src.backend.torch.rnn import rnn
|
"""Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are doing the following to automate device placement if a GPU is available:
- Variables are created on GPU.
- Input data will be placed on GPU at the first `keras.layers.Layer` call.
- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU.
- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy.
"""
from keras.src.backend.torch import core
from keras.src.backend.torch import image
from keras.src.backend.torch import linalg
from keras.src.backend.torch import math
from keras.src.backend.torch import nn
from keras.src.backend.torch import numpy
from keras.src.backend.torch import random
from keras.src.backend.torch.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.torch.core import Variable
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import compute_output_spec
from keras.src.backend.torch.core import cond
from keras.src.backend.torch.core import convert_to_numpy
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import device_scope
from keras.src.backend.torch.core import is_tensor
from keras.src.backend.torch.core import scatter
from keras.src.backend.torch.core import shape
from keras.src.backend.torch.core import stop_gradient
from keras.src.backend.torch.core import to_torch_dtype
from keras.src.backend.torch.core import vectorized_map
from keras.src.backend.torch.rnn import cudnn_ok
from keras.src.backend.torch.rnn import gru
from keras.src.backend.torch.rnn import lstm
from keras.src.backend.torch.rnn import rnn
|
from parameterized import parameterized
from torchaudio.io import AudioEffector
from torchaudio_unittest.common_utils import get_sinusoid, skipIfNoFFmpeg, TorchaudioTestCase
from .common import lt42
@skipIfNoFFmpeg
class EffectorTest(TorchaudioTestCase):
def test_null(self):
"""No effect and codec will return the same result"""
sample_rate = 8000
frames_per_chunk = 256
effector = AudioEffector(effect=None, format=None)
original = get_sinusoid(n_channels=3, sample_rate=sample_rate, channels_first=False)
# one-go
output = effector.apply(original, sample_rate)
self.assertEqual(original, output)
# streaming
for i, chunk in enumerate(effector.stream(original, sample_rate, frames_per_chunk)):
start = i * frames_per_chunk
end = (i + 1) * frames_per_chunk
self.assertEqual(original[start:end, :], chunk)
@parameterized.expand(
[
("ogg", "flac"), # flac only supports s16 and s32
("ogg", "opus"), # opus only supports 48k Hz
("ogg", "vorbis"), # vorbis only supports stereo
# ("ogg", "vorbis", 44100),
# this fails with small descrepancy; 441024 vs 441000
# TODO: investigate
("wav", None),
("wav", "pcm_u8"),
("mp3", None),
("mulaw", None, 44100), # mulaw is encoded without header
]
)
def test_formats(self, format, encoder, sample_rate=8000):
"""Formats (some with restrictions) just work without an issue in effector"""
effector = AudioEffector(format=format, encoder=encoder)
original = get_sinusoid(n_channels=3, sample_rate=sample_rate, channels_first=False)
output = effector.apply(original, sample_rate)
# On 4.1 OPUS produces 8020 samples (extra 20)
# this has been fixed on 4.2+
if encoder == "opus" and lt42():
return
self.assertEqual(original.shape, output.shape)
# Note
# MP3 adds padding which cannot be removed when the encoded data is written to
# file-like object without seek method.
# The number of padding is retrievable as `AVCoedcContext::initial_padding`
# https://ffmpeg.org/doxygen/4.1/structAVCodecContext.html#a8f95550ce04f236e9915516d04d3d1ab
# but this is not exposed yet.
# These "priming" samples have negative time stamp, so we can also add logic
# to discard them at decoding, however, as far as I checked, when data is loaded
# with StreamReader, the time stamp is reset. I tried options like avoid_negative_ts,
# https://ffmpeg.org/ffmpeg-formats.html
# but it made no difference. Perhaps this is because the information about negative
# timestamp is only available at encoding side, and it presumably is written to
# header file, but it is not happening somehow with file-like object.
# Need to investigate more to remove MP3 padding
if format == "mp3":
return
for chunk in effector.stream(original, sample_rate, frames_per_chunk=original.size(0)):
self.assertEqual(original.shape, chunk.shape)
@parameterized.expand([("loudnorm=I=-16:LRA=11:TP=-1.5",), ("volume=2",)])
def test_effect(self, effect):
sample_rate = 8000
effector = AudioEffector(effect=effect)
original = get_sinusoid(n_channels=3, sample_rate=sample_rate, channels_first=False)
output = effector.apply(original, sample_rate)
self.assertEqual(original.shape, output.shape)
|
from parameterized import parameterized
from torchaudio.io import AudioEffector
from torchaudio_unittest.common_utils import get_sinusoid, skipIfNoFFmpeg, TorchaudioTestCase
from .common import lt42
@skipIfNoFFmpeg
class EffectorTest(TorchaudioTestCase):
def test_null(self):
"""No effect and codec will return the same result"""
sample_rate = 8000
frames_per_chunk = 256
effector = AudioEffector(effect=None, format=None)
original = get_sinusoid(n_channels=3, sample_rate=sample_rate, channels_first=False)
# one-go
output = effector.apply(original, sample_rate)
self.assertEqual(original, output)
# streaming
for i, chunk in enumerate(effector.stream(original, sample_rate, frames_per_chunk)):
start = i * frames_per_chunk
end = (i + 1) * frames_per_chunk
self.assertEqual(original[start:end, :], chunk)
@parameterized.expand(
[
("ogg", "flac"), # flac only supports s16 and s32
("ogg", "opus"), # opus only supports 48k Hz
("ogg", "vorbis"), # vorbis only supports stereo
("wav", None),
("wav", "pcm_u8"),
("mp3", None),
]
)
def test_formats(self, format, encoder):
"""Formats (some with restrictions) just work without an issue in effector"""
sample_rate = 8000
effector = AudioEffector(format=format, encoder=encoder)
original = get_sinusoid(n_channels=3, sample_rate=sample_rate, channels_first=False)
output = effector.apply(original, sample_rate)
# On 4.1 OPUS produces 8020 samples (extra 20)
# this has been fixed on 4.2+
if encoder == "opus" and lt42():
return
self.assertEqual(original.shape, output.shape)
# Note
# MP3 adds padding which cannot be removed when the encoded data is written to
# file-like object without seek method.
# The number of padding is retrievable as `AVCoedcContext::initial_padding`
# https://ffmpeg.org/doxygen/4.1/structAVCodecContext.html#a8f95550ce04f236e9915516d04d3d1ab
# but this is not exposed yet.
# These "priming" samples have negative time stamp, so we can also add logic
# to discard them at decoding, however, as far as I checked, when data is loaded
# with StreamReader, the time stamp is reset. I tried options like avoid_negative_ts,
# https://ffmpeg.org/ffmpeg-formats.html
# but it made no difference. Perhaps this is because the information about negative
# timestamp is only available at encoding side, and it presumably is written to
# header file, but it is not happening somehow with file-like object.
# Need to investigate more to remove MP3 padding
if format == "mp3":
return
for chunk in effector.stream(original, sample_rate, frames_per_chunk=original.size(0)):
self.assertEqual(original.shape, chunk.shape)
@parameterized.expand([("loudnorm=I=-16:LRA=11:TP=-1.5",), ("volume=2",)])
def test_effect(self, effect):
sample_rate = 8000
effector = AudioEffector(effect=effect)
original = get_sinusoid(n_channels=3, sample_rate=sample_rate, channels_first=False)
output = effector.apply(original, sample_rate)
self.assertEqual(original.shape, output.shape)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xce\x02\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12(\n\x06tensor\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12+\n\tembedding\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x11\n\x07\x61ny_url\x18\x07 \x01(\tH\x00\x12\x13\n\timage_url\x18\x08 \x01(\tH\x00\x12\x0c\n\x02id\x18\t \x01(\tH\x00\x12.\n\x0ctorch_tensor\x18\n \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProtob\x06proto3')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start=58
_DENSENDARRAYPROTO._serialized_end=123
_NDARRAYPROTO._serialized_start=125
_NDARRAYPROTO._serialized_end=228
_NODEPROTO._serialized_start=231
_NODEPROTO._serialized_end=565
_DOCUMENTPROTO._serialized_start=568
_DOCUMENTPROTO._serialized_end=698
_DOCUMENTPROTO_DATAENTRY._serialized_start=634
_DOCUMENTPROTO_DATAENTRY._serialized_end=698
_DOCUMENTARRAYPROTO._serialized_start=700
_DOCUMENTARRAYPROTO._serialized_end=759
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\x9e\x02\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12(\n\x06tensor\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12+\n\tembedding\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x11\n\x07\x61ny_url\x18\x07 \x01(\tH\x00\x12\x13\n\timage_url\x18\x08 \x01(\tH\x00\x12\x0c\n\x02id\x18\t \x01(\tH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProtob\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_NODEPROTO._serialized_start = 231
_NODEPROTO._serialized_end = 517
_DOCUMENTPROTO._serialized_start = 520
_DOCUMENTPROTO._serialized_end = 650
_DOCUMENTPROTO_DATAENTRY._serialized_start = 586
_DOCUMENTPROTO_DATAENTRY._serialized_end = 650
_DOCUMENTARRAYPROTO._serialized_start = 652
_DOCUMENTARRAYPROTO._serialized_end = 711
# @@protoc_insertion_point(module_scope)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import NonLocal2d
from mmengine.model import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class BFP(BaseModule):
"""BFP (Balanced Feature Pyramids)
BFP takes multi-level features as inputs and gather them into a single one,
then refine the gathered feature and scatter the refined results to
multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
the paper `Libra R-CNN: Towards Balanced Learning for Object Detection
<https://arxiv.org/abs/1904.02701>`_ for details.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
num_levels (int): Number of input feature levels.
conv_cfg (dict): The config dict for convolution layers.
norm_cfg (dict): The config dict for normalization layers.
refine_level (int): Index of integration and refine level of BSF in
multi-level features from bottom to top.
refine_type (str): Type of the refine op, currently support
[None, 'conv', 'non_local'].
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
num_levels,
refine_level=2,
refine_type=None,
conv_cfg=None,
norm_cfg=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(BFP, self).__init__(init_cfg)
assert refine_type in [None, 'conv', 'non_local']
self.in_channels = in_channels
self.num_levels = num_levels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.refine_level = refine_level
self.refine_type = refine_type
assert 0 <= self.refine_level < self.num_levels
if self.refine_type == 'conv':
self.refine = ConvModule(
self.in_channels,
self.in_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
elif self.refine_type == 'non_local':
self.refine = NonLocal2d(
self.in_channels,
reduction=1,
use_scale=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_levels
# step 1: gather multi-level features by resize and average
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if i < self.refine_level:
gathered = F.adaptive_max_pool2d(
inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(
inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = sum(feats) / len(feats)
# step 2: refine gathered features
if self.refine_type is not None:
bsf = self.refine(bsf)
# step 3: scatter refined features to multi-levels by a residual path
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if i < self.refine_level:
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append(residual + inputs[i])
return tuple(outs)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import NonLocal2d
from mmcv.runner import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class BFP(BaseModule):
"""BFP (Balanced Feature Pyramids)
BFP takes multi-level features as inputs and gather them into a single one,
then refine the gathered feature and scatter the refined results to
multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
the paper `Libra R-CNN: Towards Balanced Learning for Object Detection
<https://arxiv.org/abs/1904.02701>`_ for details.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
num_levels (int): Number of input feature levels.
conv_cfg (dict): The config dict for convolution layers.
norm_cfg (dict): The config dict for normalization layers.
refine_level (int): Index of integration and refine level of BSF in
multi-level features from bottom to top.
refine_type (str): Type of the refine op, currently support
[None, 'conv', 'non_local'].
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
num_levels,
refine_level=2,
refine_type=None,
conv_cfg=None,
norm_cfg=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(BFP, self).__init__(init_cfg)
assert refine_type in [None, 'conv', 'non_local']
self.in_channels = in_channels
self.num_levels = num_levels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.refine_level = refine_level
self.refine_type = refine_type
assert 0 <= self.refine_level < self.num_levels
if self.refine_type == 'conv':
self.refine = ConvModule(
self.in_channels,
self.in_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
elif self.refine_type == 'non_local':
self.refine = NonLocal2d(
self.in_channels,
reduction=1,
use_scale=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_levels
# step 1: gather multi-level features by resize and average
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if i < self.refine_level:
gathered = F.adaptive_max_pool2d(
inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(
inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = sum(feats) / len(feats)
# step 2: refine gathered features
if self.refine_type is not None:
bsf = self.refine(bsf)
# step 3: scatter refined features to multi-levels by a residual path
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if i < self.refine_level:
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append(residual + inputs[i])
return tuple(outs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint, update_data_root
from .setup_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_root_logger
from .misc import find_latest_checkpoint
from .setup_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'setup_multi_processes'
]
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.activations.activations import threshold
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
squareplus,
soft_shrink,
sparse_plus,
silu,
gelu,
glu,
tanh,
tanh_shrink,
threshold,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
sparsemax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
squareplus,
soft_shrink,
sparse_plus,
silu,
gelu,
glu,
tanh,
tanh_shrink,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
sparsemax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads.autoassign_head import AutoAssignHead
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_autoassign_head_loss():
"""Tests autoassign head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(assigner=None, allowed_border=-1, pos_weight=-1, debug=False))
self = AutoAssignHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, objectnesses = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_pos_loss = empty_gt_losses['loss_pos']
empty_neg_loss = empty_gt_losses['loss_neg']
empty_center_loss = empty_gt_losses['loss_center']
assert empty_neg_loss.item() > 0, 'cls loss should be non-zero'
assert empty_pos_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_center_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_pos_loss = one_gt_losses['loss_pos']
onegt_neg_loss = one_gt_losses['loss_neg']
onegt_center_loss = one_gt_losses['loss_center']
assert onegt_pos_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_neg_loss.item() > 0, 'box loss should be non-zero'
assert onegt_center_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self.get_bboxes(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads.autoassign_head import AutoAssignHead
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_autoassign_head_loss():
"""Tests autoassign head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(assigner=None, allowed_border=-1, pos_weight=-1, debug=False))
self = AutoAssignHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, objectnesses = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_pos_loss = empty_gt_losses['loss_pos']
empty_neg_loss = empty_gt_losses['loss_neg']
empty_center_loss = empty_gt_losses['loss_center']
assert empty_neg_loss.item() > 0, 'cls loss should be non-zero'
assert empty_pos_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_center_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_pos_loss = one_gt_losses['loss_pos']
onegt_neg_loss = one_gt_losses['loss_neg']
onegt_center_loss = one_gt_losses['loss_center']
assert onegt_pos_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_neg_loss.item() > 0, 'box loss should be non-zero'
assert onegt_center_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
mlvl_anchors = [torch.ones(5 * 5, 2)]
img_shape = None
scale_factor = [0.5, 0.5]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self._get_bboxes(
cls_scores,
bbox_preds,
iou_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=rescale)
|
import os
import pickle
from pathlib import Path
from typing import Optional, Tuple
from jina import DocumentArray, Executor, requests
from jina.excepts import PretrainedModelFileDoesNotExist
from jina_commons.batching import get_docs_batch_generator
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-idf sparse embeddings
"""
def __init__(
self,
path_vectorizer: Optional[str] = None,
batch_size: int = 2048,
traversal_paths: Tuple[str] = ('r',),
*args,
**kwargs,
):
"""
:param path_vectorizer: path of the pre-trained tfidf sklearn vectorizer
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
"""
super().__init__(*args, **kwargs)
if path_vectorizer is None:
path_vectorizer = str(
Path(__file__).parent / 'model/tfidf_vectorizer.pickle'
)
self.path_vectorizer = path_vectorizer
self.batch_size = batch_size
self.traversal_paths = traversal_paths
if os.path.exists(self.path_vectorizer):
self.tfidf_vectorizer = pickle.load(open(self.path_vectorizer, 'rb'))
else:
raise PretrainedModelFileDoesNotExist(
f'{self.path_vectorizer} not found, cannot find a fitted tfidf_vectorizer'
)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: dict = {}, **kwargs
):
"""
Generate the TF-IDF feature vector for all text documents.
:param docs: documents sent to the encoder. The docs must have `text`.
By default, the input `text` must be a `list` of `str`.
:param parameters: dictionary to define the `traversal_paths` and the
`batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.traversal_paths),
batch_size=parameters.get('batch_size', self.batch_size),
needs_attr='text',
)
for document_batch in document_batches_generator:
iterable_of_texts = [d.text for d in document_batch]
embedding_matrix = self.tfidf_vectorizer.transform(iterable_of_texts)
for doc, doc_embedding in zip(document_batch, embedding_matrix):
doc.embedding = doc_embedding
|
import os
import pickle
from pathlib import Path
from typing import Optional, Tuple
from jina import DocumentArray, Executor, requests
from jina.excepts import PretrainedModelFileDoesNotExist
from jina_commons.batching import get_docs_batch_generator
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-idf sparse embeddings
"""
def __init__(
self,
path_vectorizer: Optional[str] = None,
default_batch_size: int = 2048,
default_traversal_paths: Tuple[str] = ('r',),
*args,
**kwargs,
):
"""
:param path_vectorizer: path of the pre-trained tfidf sklearn vectorizer
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param default_batch_size: fallback batch size in case there is not batch size sent in the request
"""
super().__init__(*args, **kwargs)
if path_vectorizer is None:
path_vectorizer = str(
Path(__file__).parent / 'model/tfidf_vectorizer.pickle'
)
self.path_vectorizer = path_vectorizer
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
if os.path.exists(self.path_vectorizer):
self.tfidf_vectorizer = pickle.load(open(self.path_vectorizer, 'rb'))
else:
raise PretrainedModelFileDoesNotExist(
f'{self.path_vectorizer} not found, cannot find a fitted tfidf_vectorizer'
)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, **kwargs):
"""
Generate the TF-IDF feature vector for all text documents.
:param docs: documents sent to the encoder. The docs must have `text`.
By default, the input `text` must be a `list` of `str`.
:param parameters: dictionary to define the `traversal_paths` and the
`batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
for document_batch in document_batches_generator:
iterable_of_texts = [d.text for d in document_batch]
embedding_matrix = self.tfidf_vectorizer.transform(iterable_of_texts)
for doc, doc_embedding in zip(document_batch, embedding_matrix):
doc.embedding = doc_embedding
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple, Dict, List
import numpy as np
from jina import Executor, requests, DocumentArray, Document
from jina_commons import get_logger
from jina_commons.indexers.dump import import_vectors
class NumpySearcher(Executor):
def __init__(
self,
dump_path: str = None,
default_top_k: int = 5,
default_traversal_paths: List[str] = ['r'],
metric: str = 'cosine',
is_distance: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.default_traversal_paths = default_traversal_paths
self.is_distance = is_distance
self.metric = metric
self.dump_path = dump_path or kwargs.get('runtime_args').get('dump_path')
self.logger = get_logger(self)
self.default_top_k = default_top_k
if self.dump_path is not None:
self.logger.info(f'Importing data from {self.dump_path}')
ids, vecs = import_vectors(self.dump_path, str(self.runtime_args.pea_id))
self._ids = np.array(list(ids))
self._vecs = np.array(list(vecs))
self._ids_to_idx = {}
self.logger.info(f'Imported {len(self._ids)} documents.')
else:
self.logger.warning(
f'No dump_path provided for {self.__class__.__name__}. Use flow.rolling_update()...'
)
@requests(on='/search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
if not hasattr(self, '_vecs') or not self._vecs.size:
self.logger.warning('Searching an empty index')
return
top_k = int(parameters.get('top_k', self.default_top_k))
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
doc_embeddings = docs.traverse_flat(traversal_paths).get_attributes('embedding')
if not docs:
self.logger.info('No documents to search for')
return
if not doc_embeddings or doc_embeddings[0] is None:
self.logger.info('None of the docs have any embeddings')
return
doc_embeddings = np.stack(doc_embeddings)
q_emb = _ext_A(_norm(doc_embeddings))
d_emb = _ext_B(_norm(self._vecs))
if self.metric == 'cosine':
dists = _cosine(q_emb, d_emb)
elif self.metric == 'euclidean':
dists = _euclidean(q_emb, d_emb)
else:
self.logger.error(f'Metric {self.metric} not supported.')
positions, dist = self._get_sorted_top_k(dists, top_k)
for _q, _positions, _dists in zip(docs.traverse_flat(traversal_paths), positions, dist):
for position, dist in zip(_positions, _dists):
d = Document(id=self._ids[position], embedding=self._vecs[position])
if self.is_distance:
d.scores[self.metric] = dist
else:
if self.metric == 'cosine':
d.scores[self.metric] = 1 - dist
elif self.metric == 'euclidean':
d.scores[self.metric] = 1 / (1 + dist)
_q.matches.append(d)
@staticmethod
def _get_sorted_top_k(
dist: 'np.array', top_k: int
) -> Tuple['np.ndarray', 'np.ndarray']:
if top_k >= dist.shape[1]:
idx = dist.argsort(axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx, axis=1)
else:
idx_ps = dist.argpartition(kth=top_k, axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx_ps, axis=1)
idx_fs = dist.argsort(axis=1)
idx = np.take_along_axis(idx_ps, idx_fs, axis=1)
dist = np.take_along_axis(dist, idx_fs, axis=1)
return idx, dist
def _get_ones(x, y):
return np.ones((x, y))
def _ext_A(A):
nA, dim = A.shape
A_ext = _get_ones(nA, dim * 3)
A_ext[:, dim: 2 * dim] = A
A_ext[:, 2 * dim:] = A ** 2
return A_ext
def _ext_B(B):
nB, dim = B.shape
B_ext = _get_ones(dim * 3, nB)
B_ext[:dim] = (B ** 2).T
B_ext[dim: 2 * dim] = -2.0 * B.T
del B
return B_ext
def _euclidean(A_ext, B_ext):
sqdist = A_ext.dot(B_ext).clip(min=0)
return np.sqrt(sqdist)
def _norm(A):
return A / np.linalg.norm(A, ord=2, axis=1, keepdims=True)
def _cosine(A_norm_ext, B_norm_ext):
return A_norm_ext.dot(B_norm_ext).clip(min=0) / 2
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple, Dict, List
import numpy as np
from jina import Executor, requests, DocumentArray, Document
from jina_commons import get_logger
from jina_commons.indexers.dump import import_vectors
class NumpySearcher(Executor):
def __init__(
self,
dump_path: str = None,
default_top_k: int = 5,
default_traversal_paths: List[str] = ['r'],
metric: str = 'cosine',
is_distance: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.default_traversal_paths = default_traversal_paths
self.is_distance = is_distance
self.metric = metric
self.dump_path = dump_path or kwargs.get('runtime_args').get('dump_path')
self.logger = get_logger(self)
self.default_top_k = default_top_k
if self.dump_path is not None:
self.logger.info(f'Importing data from {self.dump_path}')
ids, vecs = import_vectors(self.dump_path, str(self.runtime_args.pea_id))
self._ids = np.array(list(ids))
self._vecs = np.array(list(vecs))
self._ids_to_idx = {}
self.logger.info(f'Imported {len(self._ids)} documents.')
else:
self.logger.warning(
f'No dump_path provided for {self.__class__.__name__}. Use flow.rolling_update()...'
)
@requests(on='/search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
if not hasattr(self, '_vecs') or not self._vecs.size:
self.logger.warning('Searching an empty index')
return
top_k = int(parameters.get('top_k', self.default_top_k))
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
doc_embeddings = docs.traverse_flat(traversal_paths).get_attributes('embedding')
if not docs:
self.logger.info('No documents to search for')
return
if not doc_embeddings:
self.logger.info('None of the docs have any embeddings')
return
doc_embeddings = np.stack(doc_embeddings)
q_emb = _ext_A(_norm(doc_embeddings))
d_emb = _ext_B(_norm(self._vecs))
if self.metric == 'cosine':
dists = _cosine(q_emb, d_emb)
elif self.metric == 'euclidean':
dists = _euclidean(q_emb, d_emb)
else:
self.logger.error(f'Metric {self.metric} not supported.')
positions, dist = self._get_sorted_top_k(dists, top_k)
for _q, _positions, _dists in zip(docs, positions, dist):
for position, dist in zip(_positions, _dists):
d = Document(id=self._ids[position], embedding=self._vecs[position])
if self.is_distance:
d.scores[self.metric] = dist
else:
if self.metric == 'cosine':
d.scores[self.metric] = 1 - dist
elif self.metric == 'euclidean':
d.scores[self.metric] = 1 / (1 + dist)
_q.matches.append(d)
@staticmethod
def _get_sorted_top_k(
dist: 'np.array', top_k: int
) -> Tuple['np.ndarray', 'np.ndarray']:
if top_k >= dist.shape[1]:
idx = dist.argsort(axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx, axis=1)
else:
idx_ps = dist.argpartition(kth=top_k, axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx_ps, axis=1)
idx_fs = dist.argsort(axis=1)
idx = np.take_along_axis(idx_ps, idx_fs, axis=1)
dist = np.take_along_axis(dist, idx_fs, axis=1)
return idx, dist
def _get_ones(x, y):
return np.ones((x, y))
def _ext_A(A):
nA, dim = A.shape
A_ext = _get_ones(nA, dim * 3)
A_ext[:, dim: 2 * dim] = A
A_ext[:, 2 * dim:] = A ** 2
return A_ext
def _ext_B(B):
nB, dim = B.shape
B_ext = _get_ones(dim * 3, nB)
B_ext[:dim] = (B ** 2).T
B_ext[dim: 2 * dim] = -2.0 * B.T
del B
return B_ext
def _euclidean(A_ext, B_ext):
sqdist = A_ext.dot(B_ext).clip(min=0)
return np.sqrt(sqdist)
def _norm(A):
return A / np.linalg.norm(A, ord=2, axis=1, keepdims=True)
def _cosine(A_norm_ext, B_norm_ext):
return A_norm_ext.dot(B_norm_ext).clip(min=0) / 2
|
import fastapi
from .config import Settings
from .middleware import auth_middleware
from .models import DEFAULT_USER_ID, User
def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User:
return verify_user(payload, admin_only=False)
def requires_admin_user(
payload: dict = fastapi.Depends(auth_middleware),
) -> User:
return verify_user(payload, admin_only=True)
def verify_user(payload: dict | None, admin_only: bool) -> User:
if not payload:
if Settings.ENABLE_AUTH:
raise fastapi.HTTPException(
status_code=401, detail="Authorization header is missing"
)
# This handles the case when authentication is disabled
payload = {"sub": DEFAULT_USER_ID, "role": "admin"}
user_id = payload.get("sub")
if not user_id:
raise fastapi.HTTPException(
status_code=401, detail="User ID not found in token"
)
if admin_only and payload["role"] != "admin":
raise fastapi.HTTPException(status_code=403, detail="Admin access required")
return User.from_payload(payload)
|
import fastapi
from .middleware import auth_middleware
from .models import User, DEFAULT_USER_ID, DEFAULT_EMAIL
from .config import Settings
def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User:
return verify_user(payload, admin_only=False)
def requires_admin_user(
payload: dict = fastapi.Depends(auth_middleware),
) -> User:
return verify_user(payload, admin_only=True)
def verify_user(payload: dict | None, admin_only: bool) -> User:
if not payload:
if Settings.ENABLE_AUTH:
raise fastapi.HTTPException(
status_code=401, detail="Authorization header is missing"
)
# This handles the case when authentication is disabled
payload = {"sub": DEFAULT_USER_ID, "role": "admin"}
user_id = payload.get("sub")
if not user_id:
raise fastapi.HTTPException(
status_code=401, detail="User ID not found in token"
)
if admin_only and payload["role"] != "admin":
raise fastapi.HTTPException(status_code=403, detail="Admin access required")
return User.from_payload(payload)
|
import numpy as np
import pytest
from typing import Dict, List
from docarray import DocList
from docarray.base_doc import AnyDoc, BaseDoc
from docarray.typing import NdArray
def test_any_doc():
class InnerDocument(BaseDoc):
text: str
tensor: NdArray
class CustomDoc(BaseDoc):
inner: InnerDocument
text: str
doc = CustomDoc(
text='bye', inner=InnerDocument(text='hello', tensor=np.zeros((3, 224, 224)))
)
any_doc = AnyDoc(**doc.__dict__)
assert any_doc.text == doc.text
assert any_doc.inner.text == doc.inner.text
assert (any_doc.inner.tensor == doc.inner.tensor).all()
@pytest.mark.parametrize('protocol', ['proto', 'json'])
def test_any_document_from_to(protocol):
class InnerDoc(BaseDoc):
text: str
t: Dict[str, str]
class DocTest(BaseDoc):
text: str
tags: Dict[str, int]
l: List[int]
d: InnerDoc
ld: DocList[InnerDoc]
inner_doc = InnerDoc(text='I am inner', t={'a': 'b'})
da = DocList[DocTest](
[
DocTest(
text='type1',
tags={'type': 1},
l=[1, 2],
d=inner_doc,
ld=DocList[InnerDoc]([inner_doc]),
),
DocTest(
text='type2',
tags={'type': 2},
l=[1, 2],
d=inner_doc,
ld=DocList[InnerDoc]([inner_doc]),
),
]
)
from docarray.base_doc import AnyDoc
if protocol == 'proto':
aux = DocList[AnyDoc].from_protobuf(da.to_protobuf())
else:
aux = DocList[AnyDoc].from_json(da.to_json())
assert len(aux) == 2
assert len(aux.id) == 2
for i, d in enumerate(aux):
assert d.tags['type'] == i + 1
assert d.text == f'type{i + 1}'
assert d.l == [1, 2]
if protocol == 'proto':
assert isinstance(d.d, AnyDoc)
assert d.d.text == 'I am inner' # inner Document is a Dict
assert d.d.t == {'a': 'b'}
else:
assert isinstance(d.d, dict)
assert d.d['text'] == 'I am inner' # inner Document is a Dict
assert d.d['t'] == {'a': 'b'}
assert len(d.ld) == 1
if protocol == 'proto':
assert isinstance(d.ld[0], AnyDoc)
assert d.ld[0].text == 'I am inner'
assert d.ld[0].t == {'a': 'b'}
else:
assert isinstance(d.ld[0], dict)
assert d.ld[0]['text'] == 'I am inner'
assert d.ld[0]['t'] == {'a': 'b'}
|
import numpy as np
from docarray.base_doc import AnyDoc, BaseDoc
from docarray.typing import NdArray
def test_any_doc():
class InnerDocument(BaseDoc):
text: str
tensor: NdArray
class CustomDoc(BaseDoc):
inner: InnerDocument
text: str
doc = CustomDoc(
text='bye', inner=InnerDocument(text='hello', tensor=np.zeros((3, 224, 224)))
)
any_doc = AnyDoc(**doc.__dict__)
assert any_doc.text == doc.text
assert any_doc.inner.text == doc.inner.text
assert (any_doc.inner.tensor == doc.inner.tensor).all()
|
from collections import Counter
from typing import Callable, List
from llama_index.core.bridge.pydantic import Field
from llama_index.core.base.embeddings.base_sparse import (
BaseSparseEmbedding,
SparseEmbedding,
)
def get_default_tokenizer() -> Callable:
"""
Get default tokenizer.
NOTE: taken from https://www.pinecone.io/learn/hybrid-search-intro/.
"""
from transformers import BertTokenizerFast
orig_tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
def _tokenizer(texts: List[str]) -> List[List[int]]:
return orig_tokenizer(texts, padding=True, truncation=True, max_length=512)[
"input_ids"
]
return _tokenizer
class DefaultPineconeSparseEmbedding(BaseSparseEmbedding):
"""Default Pinecone sparse embedding."""
tokenizer: Callable = Field(
default_factory=get_default_tokenizer,
description="A callable that returns token input ids.",
)
def build_sparse_embeddings(
self, input_batch: List[List[int]]
) -> List[SparseEmbedding]:
"""
Build a list of sparse dictionaries from a batch of input_ids.
NOTE: taken from https://www.pinecone.io/learn/hybrid-search-intro/.
"""
# store a batch of sparse embeddings
sparse_emb_list = []
# iterate through input batch
for token_ids in input_batch:
sparse_emb = {}
# convert the input_ids list to a dictionary of key to frequency values
d = dict(Counter(token_ids))
for idx in d:
sparse_emb[idx] = float(d[idx])
sparse_emb_list.append(sparse_emb)
# return sparse_emb list
return sparse_emb_list
def _get_query_embedding(self, query: str) -> SparseEmbedding:
"""Embed the input query synchronously."""
token_ids = self.tokenizer([query])[0]
return self.build_sparse_embeddings([token_ids])[0]
async def _aget_query_embedding(self, query: str) -> SparseEmbedding:
"""Embed the input query asynchronously."""
return self._get_query_embedding(query)
def _get_text_embedding(self, text: str) -> SparseEmbedding:
"""Embed the input text synchronously."""
return self._get_query_embedding(text)
async def _aget_text_embedding(self, text: str) -> SparseEmbedding:
"""Embed the input text asynchronously."""
return self._get_query_embedding(text)
|
from collections import Counter
from packaging import version
from typing import Any, Callable, List
from llama_index.core.bridge.pydantic import Field
from llama_index.core.base.embeddings.base_sparse import (
BaseSparseEmbedding,
SparseEmbedding,
)
def get_default_tokenizer() -> Callable:
"""
Get default tokenizer.
NOTE: taken from https://www.pinecone.io/learn/hybrid-search-intro/.
"""
from transformers import BertTokenizerFast
orig_tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
def _tokenizer(texts: List[str]) -> List[List[int]]:
return orig_tokenizer(texts, padding=True, truncation=True, max_length=512)[
"input_ids"
]
return _tokenizer
class DefaultPineconeSparseEmbedding(BaseSparseEmbedding):
"""Default Pinecone sparse embedding."""
tokenizer: Callable = Field(
default_factory=get_default_tokenizer,
description="A callable that returns token input ids.",
)
def build_sparse_embeddings(
self, input_batch: List[List[int]]
) -> List[SparseEmbedding]:
"""
Build a list of sparse dictionaries from a batch of input_ids.
NOTE: taken from https://www.pinecone.io/learn/hybrid-search-intro/.
"""
# store a batch of sparse embeddings
sparse_emb_list = []
# iterate through input batch
for token_ids in input_batch:
sparse_emb = {}
# convert the input_ids list to a dictionary of key to frequency values
d = dict(Counter(token_ids))
for idx in d:
sparse_emb[idx] = float(d[idx])
sparse_emb_list.append(sparse_emb)
# return sparse_emb list
return sparse_emb_list
def _get_query_embedding(self, query: str) -> SparseEmbedding:
"""Embed the input query synchronously."""
token_ids = self.tokenizer([query])[0]
return self.build_sparse_embeddings([token_ids])[0]
async def _aget_query_embedding(self, query: str) -> SparseEmbedding:
"""Embed the input query asynchronously."""
return self._get_query_embedding(query)
def _get_text_embedding(self, text: str) -> SparseEmbedding:
"""Embed the input text synchronously."""
return self._get_query_embedding(text)
async def _aget_text_embedding(self, text: str) -> SparseEmbedding:
"""Embed the input text asynchronously."""
return self._get_query_embedding(text)
def _import_pinecone() -> Any:
"""
Try to import pinecone module. If it's not already installed, instruct user how to install.
"""
try:
import pinecone
except ImportError as e:
raise ImportError(
"Could not import pinecone python package. "
"Please install it with `pip install pinecone-client`."
) from e
return pinecone
def _is_pinecone_v3() -> bool:
"""
Check whether the pinecone client is >= 3.0.0.
"""
pinecone = _import_pinecone()
pinecone_client_version = pinecone.__version__
return version.parse(pinecone_client_version) >= version.parse("3.0.0")
|
"""Test utils."""
from typing import List, Annotated
from llama_index.core.bridge.pydantic import Field
from llama_index.core.tools.utils import create_schema_from_function
def test_create_schema_from_function() -> None:
"""Test create schema from function."""
def test_fn(x: int, y: int, z: List[str]) -> None:
"""Test function."""
SchemaCls = create_schema_from_function("test_schema", test_fn)
schema = SchemaCls.model_json_schema()
assert schema["properties"]["x"]["type"] == "integer"
assert schema["properties"]["y"]["type"] == "integer"
assert schema["properties"]["z"]["type"] == "array"
assert schema["required"] == ["x", "y", "z"]
SchemaCls = create_schema_from_function("test_schema", test_fn, [("a", bool, 1)])
schema = SchemaCls.model_json_schema()
assert schema["properties"]["a"]["type"] == "boolean"
def test_fn2(x: int = 1) -> None:
"""Optional input."""
SchemaCls = create_schema_from_function("test_schema", test_fn2)
schema = SchemaCls.model_json_schema()
assert "required" not in schema
def test_create_schema_from_function_with_field() -> None:
"""Test create_schema_from_function with pydantic.Field."""
def tmp_function(x: int = Field(3, description="An integer")) -> str:
return str(x)
schema = create_schema_from_function("TestSchema", tmp_function)
actual_schema = schema.model_json_schema()
assert "x" in actual_schema["properties"]
assert actual_schema["properties"]["x"]["type"] == "integer"
assert actual_schema["properties"]["x"]["default"] == 3
assert actual_schema["properties"]["x"]["description"] == "An integer"
# Test the created schema
instance = schema()
assert instance.x == 3 # type: ignore
instance = schema(x=5)
assert instance.x == 5 # type: ignore
def test_create_schema_from_function_with_typing_annotated() -> None:
"""Test create_schema_from_function with pydantic.Field."""
def tmp_function(x: Annotated[int, "An integer"] = 3) -> str:
return str(x)
schema = create_schema_from_function("TestSchema", tmp_function)
actual_schema = schema.model_json_schema()
assert "x" in actual_schema["properties"]
assert actual_schema["properties"]["x"]["type"] == "integer"
assert actual_schema["properties"]["x"]["default"] == 3
assert actual_schema["properties"]["x"]["description"] == "An integer"
# Test the created schema
instance = schema()
assert instance.x == 3 # type: ignore
instance = schema(x=5)
assert instance.x == 5 # type: ignore
def test_create_schema_from_function_with_field_annotated() -> None:
"""Test create_schema_from_function with Annotated[pydantic.Field]."""
def tmp_function(x: Annotated[int, Field(description="An integer")] = 3) -> str:
return str(x)
schema = create_schema_from_function("TestSchema", tmp_function)
actual_schema = schema.model_json_schema()
assert "x" in actual_schema["properties"]
assert actual_schema["properties"]["x"]["type"] == "integer"
assert actual_schema["properties"]["x"]["default"] == 3
assert actual_schema["properties"]["x"]["description"] == "An integer"
# Test the created schema
instance = schema()
assert instance.x == 3 # type: ignore
instance = schema(x=5)
assert instance.x == 5 # type: ignore
|
"""Test utils."""
from typing import List, Annotated
from llama_index.core.bridge.pydantic import Field
from llama_index.core.tools.utils import create_schema_from_function
def test_create_schema_from_function() -> None:
"""Test create schema from function."""
def test_fn(x: int, y: int, z: List[str]) -> None:
"""Test function."""
SchemaCls = create_schema_from_function("test_schema", test_fn)
schema = SchemaCls.model_json_schema()
assert schema["properties"]["x"]["type"] == "integer"
assert schema["properties"]["y"]["type"] == "integer"
assert schema["properties"]["z"]["type"] == "array"
assert schema["required"] == ["x", "y", "z"]
SchemaCls = create_schema_from_function("test_schema", test_fn, [("a", bool, 1)])
schema = SchemaCls.model_json_schema()
assert schema["properties"]["a"]["type"] == "boolean"
def test_fn2(x: int = 1) -> None:
"""Optional input."""
SchemaCls = create_schema_from_function("test_schema", test_fn2)
schema = SchemaCls.model_json_schema()
assert "required" not in schema
def test_create_schema_from_function_with_field() -> None:
"""Test create_schema_from_function with pydantic.Field."""
def tmp_function(x: int = Field(3, description="An integer")) -> str:
return str(x)
schema = create_schema_from_function("TestSchema", tmp_function)
actual_schema = schema.model_json_schema()
assert "x" in actual_schema["properties"]
assert actual_schema["properties"]["x"]["type"] == "integer"
assert actual_schema["properties"]["x"]["default"] == 3
assert actual_schema["properties"]["x"]["description"] == "An integer"
# Test the created schema
instance = schema()
assert instance.x == 3 # type: ignore
instance = schema(x=5)
assert instance.x == 5 # type: ignore
def test_create_schema_from_function_with_typing_annotated() -> None:
"""Test create_schema_from_function with pydantic.Field."""
def tmp_function(x: Annotated[int, "An integer"] = 3) -> str:
return str(x)
schema = create_schema_from_function("TestSchema", tmp_function)
actual_schema = schema.model_json_schema()
assert "x" in actual_schema["properties"]
assert actual_schema["properties"]["x"]["type"] == "integer"
assert actual_schema["properties"]["x"]["default"] == 3
assert actual_schema["properties"]["x"]["description"] == "An integer"
# Test the created schema
instance = schema()
assert instance.x == 3 # type: ignore
instance = schema(x=5)
assert instance.x == 5 # type: ignore
|
import os
from itertools import cycle
from pathlib import Path
import pytest
from doc_cache import DocCache
from jina import Document, DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.mark.parametrize('cache_fields', ['[content_hash]', '[id]'])
@pytest.mark.parametrize('value', [['a'], ['a', 'b']])
def test_cache(tmp_path, cache_fields, value):
os.environ['CACHE_FIELDS'] = cache_fields
os.environ['CACHE_WORKSPACE'] = str(tmp_path / 'cache')
docs = DocumentArray()
for _id, v in enumerate(cycle(value)):
if _id >= 2:
break
if cache_fields == '[content_hash]':
doc = Document(content=v)
elif cache_fields == '[id]':
doc = Document(id=v)
docs.append(doc)
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
response = f.post(on='/index', inputs=DocumentArray(docs), return_results=True)
assert len(response[0].docs) == len(value)
if cache_fields == '[content_hash]':
result_set = set([d.content for d in response[0].docs])
elif cache_fields == '[id]':
result_set = set([d.id for d in response[0].docs])
assert result_set == set(value)
@pytest.mark.parametrize('content', [['a'], ['a', 'b']])
def test_cache_two_fields(tmp_path, content):
os.environ['CACHE_FIELDS'] = '[id, content]'
os.environ['CACHE_WORKSPACE'] = str(tmp_path / 'cache')
docs = DocumentArray()
for _id, c in enumerate(cycle(content)):
if _id >= 3:
break
docs.append(Document(id=c, content='content'))
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
response = f.post(on='/index', inputs=docs, return_results=True)
assert len(response[0].docs) == len(set(content))
def test_default_config(tmp_path):
import shutil
shutil.rmtree(str(Path(__file__).parents[2] / 'cache'), ignore_errors=True)
docs = DocumentArray(
[
Document(id=1, content='🐯'),
Document(id=2, content='🐯'),
Document(id=3, content='🐻'),
]
)
docs_to_update = DocumentArray([Document(id=2, content='🐼')])
with Flow().add(
uses=str(Path(__file__).parents[2] / 'config.yml'),
uses_metas={'workspace': str(tmp_path / 'cache')},
) as f:
response = f.post(on='/index', inputs=docs, return_results=True)
# the duplicated Document is removed from the request
assert len(response[0].data.docs) == 2
assert set([doc.id for doc in response[0].data.docs]) == set(['1', '3'])
response = f.post(on='/update', inputs=docs_to_update, return_results=True)
# the Document with `id=2` is no longer duplicated.
assert len(response[0].data.docs) == 1
response = f.post(on='/index', inputs=docs[-1], return_results=True)
assert len(response[0].data.docs) == 0 # the Document has been cached
f.post(on='/delete', inputs=docs[-1])
response = f.post(on='/index', inputs=docs[-1], return_results=True)
# the Document is cached again after the deletion
assert len(response[0].data.docs) == 1
|
import os
from itertools import cycle
from pathlib import Path
import pytest
from jina import Document, DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.mark.parametrize('cache_fields', ['[content_hash]', '[id]'])
@pytest.mark.parametrize('value', [['a'], ['a', 'b']])
def test_cache(tmp_path, cache_fields, value):
os.environ['CACHE_FIELDS'] = cache_fields
os.environ['CACHE_WORKSPACE'] = str(tmp_path / 'cache')
docs = DocumentArray()
for _id, v in enumerate(cycle(value)):
if _id >= 2:
break
if cache_fields == '[content_hash]':
doc = Document(content=v)
elif cache_fields == '[id]':
doc = Document(id=v)
docs.append(doc)
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
response = f.post(on='/index', inputs=DocumentArray(docs), return_results=True)
assert len(response[0].docs) == len(value)
if cache_fields == '[content_hash]':
result_set = set([d.content for d in response[0].docs])
elif cache_fields == '[id]':
result_set = set([d.id for d in response[0].docs])
assert result_set == set(value)
@pytest.mark.parametrize('content', [['a'], ['a', 'b']])
def test_cache_two_fields(tmp_path, content):
os.environ['CACHE_FIELDS'] = '[id, content]'
os.environ['CACHE_WORKSPACE'] = str(tmp_path / 'cache')
docs = DocumentArray()
for _id, c in enumerate(cycle(content)):
if _id >= 3:
break
docs.append(Document(id=c, content='content'))
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
response = f.post(on='/index', inputs=docs, return_results=True)
assert len(response[0].docs) == len(set(content))
def test_default_config(tmp_path):
import shutil
shutil.rmtree(str(Path(__file__).parents[2] / 'cache'), ignore_errors=True)
docs = DocumentArray(
[
Document(id=1, content='🐯'),
Document(id=2, content='🐯'),
Document(id=3, content='🐻'),
]
)
docs_to_update = DocumentArray([Document(id=2, content='🐼')])
with Flow().add(
uses=str(Path(__file__).parents[2] / 'config.yml'),
uses_metas={'workspace': str(tmp_path / 'cache')},
) as f:
response = f.post(on='/index', inputs=docs, return_results=True)
# the duplicated Document is removed from the request
assert len(response[0].data.docs) == 2
assert set([doc.id for doc in response[0].data.docs]) == set(['1', '3'])
response = f.post(on='/update', inputs=docs_to_update, return_results=True)
# the Document with `id=2` is no longer duplicated.
assert len(response[0].data.docs) == 1
response = f.post(on='/index', inputs=docs[-1], return_results=True)
assert len(response[0].data.docs) == 0 # the Document has been cached
f.post(on='/delete', inputs=docs[-1])
response = f.post(on='/index', inputs=docs[-1], return_results=True)
# the Document is cached again after the deletion
assert len(response[0].data.docs) == 1
|
import enum
from typing import Any, Callable, Dict, List, Tuple, Type, Union
import PIL.Image
import torch
from torch import nn
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision.prototype.transforms.utils import check_type
from torchvision.utils import _log_api_usage_once
class Transform(nn.Module):
# Class attribute defining transformed types. Other types are passed-through without any transformation
# We support both Types and callables that are able to do further checks on the type of the input.
_transformed_types: Tuple[Union[Type, Callable[[Any], bool]], ...] = (torch.Tensor, PIL.Image.Image)
def __init__(self) -> None:
super().__init__()
_log_api_usage_once(self)
def _check_inputs(self, flat_inputs: List[Any]) -> None:
pass
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
return dict()
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
raise NotImplementedError
def forward(self, *inputs: Any) -> Any:
flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0])
self._check_inputs(flat_inputs)
params = self._get_params(flat_inputs)
flat_outputs = [
self._transform(inpt, params) if check_type(inpt, self._transformed_types) else inpt for inpt in flat_inputs
]
return tree_unflatten(flat_outputs, spec)
def extra_repr(self) -> str:
extra = []
for name, value in self.__dict__.items():
if name.startswith("_") or name == "training":
continue
if not isinstance(value, (bool, int, float, str, tuple, list, enum.Enum)):
continue
extra.append(f"{name}={value}")
return ", ".join(extra)
class _RandomApplyTransform(Transform):
def __init__(self, p: float = 0.5) -> None:
if not (0.0 <= p <= 1.0):
raise ValueError("`p` should be a floating point value in the interval [0.0, 1.0].")
super().__init__()
self.p = p
def forward(self, *inputs: Any) -> Any:
# We need to almost duplicate `Transform.forward()` here since we always want to check the inputs, but return
# early afterwards in case the random check triggers. The same result could be achieved by calling
# `super().forward()` after the random check, but that would call `self._check_inputs` twice.
inputs = inputs if len(inputs) > 1 else inputs[0]
flat_inputs, spec = tree_flatten(inputs)
self._check_inputs(flat_inputs)
if torch.rand(1) >= self.p:
return inputs
params = self._get_params(flat_inputs)
flat_outputs = [
self._transform(inpt, params) if check_type(inpt, self._transformed_types) else inpt for inpt in flat_inputs
]
return tree_unflatten(flat_outputs, spec)
|
import enum
from typing import Any, Callable, Dict, List, Tuple, Type, Union
import PIL.Image
import torch
from torch import nn
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision.prototype.transforms._utils import _isinstance
from torchvision.utils import _log_api_usage_once
class Transform(nn.Module):
# Class attribute defining transformed types. Other types are passed-through without any transformation
# We support both Types and callables that are able to do further checks on the type of the input.
_transformed_types: Tuple[Union[Type, Callable[[Any], bool]], ...] = (torch.Tensor, PIL.Image.Image)
def __init__(self) -> None:
super().__init__()
_log_api_usage_once(self)
def _check_inputs(self, flat_inputs: List[Any]) -> None:
pass
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
return dict()
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
raise NotImplementedError
def forward(self, *inputs: Any) -> Any:
flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0])
self._check_inputs(flat_inputs)
params = self._get_params(flat_inputs)
flat_outputs = [
self._transform(inpt, params) if _isinstance(inpt, self._transformed_types) else inpt
for inpt in flat_inputs
]
return tree_unflatten(flat_outputs, spec)
def extra_repr(self) -> str:
extra = []
for name, value in self.__dict__.items():
if name.startswith("_") or name == "training":
continue
if not isinstance(value, (bool, int, float, str, tuple, list, enum.Enum)):
continue
extra.append(f"{name}={value}")
return ", ".join(extra)
class _RandomApplyTransform(Transform):
def __init__(self, p: float = 0.5) -> None:
if not (0.0 <= p <= 1.0):
raise ValueError("`p` should be a floating point value in the interval [0.0, 1.0].")
super().__init__()
self.p = p
def forward(self, *inputs: Any) -> Any:
# We need to almost duplicate `Transform.forward()` here since we always want to check the inputs, but return
# early afterwards in case the random check triggers. The same result could be achieved by calling
# `super().forward()` after the random check, but that would call `self._check_inputs` twice.
inputs = inputs if len(inputs) > 1 else inputs[0]
flat_inputs, spec = tree_flatten(inputs)
self._check_inputs(flat_inputs)
if torch.rand(1) >= self.p:
return inputs
params = self._get_params(flat_inputs)
flat_outputs = [
self._transform(inpt, params) if _isinstance(inpt, self._transformed_types) else inpt
for inpt in flat_inputs
]
return tree_unflatten(flat_outputs, spec)
|
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
from docarray.utils._internal.pydantic import is_pydantic_v2
from tests.index.elastic.fixture import start_storage_v8 # noqa: F401
file_to_skip = ['fastAPI', 'jina', 'index', 'first_steps.md']
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
"\tHint: you can add support for any language by using register_executor"
)
executor = _executors[lang]
all_code = ""
add_code_block = True
for b in grab_code_blocks(raw, lang=lang):
add_code_block = True
for keyword in keyword_ignore:
if keyword in b:
add_code_block = False
break
if add_code_block:
all_code = f"{all_code}\n{b}"
executor(all_code)
def check_md_file(fpath, memory=False, lang="python", keyword_ignore=[]):
"""
NOTE: copy paste from mktestdocs.__main__ and add the keyword ignore
Given a markdown file, parse the contents for python code blocks
and check that each independent block does not cause an error.
Arguments:
fpath: path to markdown file
memory: whether or not previous code-blocks should be remembered
"""
text = pathlib.Path(fpath).read_text()
if not memory:
check_raw_string(text, lang=lang)
else:
check_raw_file_full(text, lang=lang, keyword_ignore=keyword_ignore)
files_to_check = [
*list(pathlib.Path('docs/user_guide').glob('**/*.md')),
*list(pathlib.Path('docs/data_types').glob('**/*.md')),
]
file_to_remove = []
for file in files_to_check:
for fn in file_to_skip:
if fn in str(file):
file_to_remove.append(file)
for file in file_to_remove:
files_to_check.remove(file)
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
@pytest.mark.parametrize('fpath', files_to_check, ids=str)
def test_files_good(fpath):
check_md_file(fpath=fpath, memory=True, keyword_ignore=['pickle', 'jac'])
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_readme():
check_md_file(
fpath='README.md',
memory=True,
keyword_ignore=[
'tensorflow',
'fastapi',
'push',
'langchain',
'MovieDoc',
'jina',
],
)
|
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
from tests.index.elastic.fixture import start_storage_v8 # noqa: F401
file_to_skip = ['fastAPI', 'jina', 'index', 'first_steps.md']
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
"\tHint: you can add support for any language by using register_executor"
)
executor = _executors[lang]
all_code = ""
add_code_block = True
for b in grab_code_blocks(raw, lang=lang):
add_code_block = True
for keyword in keyword_ignore:
if keyword in b:
add_code_block = False
break
if add_code_block:
all_code = f"{all_code}\n{b}"
executor(all_code)
def check_md_file(fpath, memory=False, lang="python", keyword_ignore=[]):
"""
NOTE: copy paste from mktestdocs.__main__ and add the keyword ignore
Given a markdown file, parse the contents for python code blocks
and check that each independent block does not cause an error.
Arguments:
fpath: path to markdown file
memory: whether or not previous code-blocks should be remembered
"""
text = pathlib.Path(fpath).read_text()
if not memory:
check_raw_string(text, lang=lang)
else:
check_raw_file_full(text, lang=lang, keyword_ignore=keyword_ignore)
files_to_check = [
*list(pathlib.Path('docs/user_guide').glob('**/*.md')),
*list(pathlib.Path('docs/data_types').glob('**/*.md')),
]
file_to_remove = []
for file in files_to_check:
for fn in file_to_skip:
if fn in str(file):
file_to_remove.append(file)
for file in file_to_remove:
files_to_check.remove(file)
@pytest.mark.parametrize('fpath', files_to_check, ids=str)
def test_files_good(fpath):
check_md_file(fpath=fpath, memory=True, keyword_ignore=['pickle', 'jac'])
def test_readme():
check_md_file(
fpath='README.md',
memory=True,
keyword_ignore=[
'tensorflow',
'fastapi',
'push',
'langchain',
'MovieDoc',
'jina',
],
)
|
"""Default prompt selectors."""
from llama_index.core.prompts import SelectorPromptTemplate
from llama_index.core.prompts.chat_prompts import (
CHAT_REFINE_PROMPT,
CHAT_REFINE_TABLE_CONTEXT_PROMPT,
CHAT_TEXT_QA_PROMPT,
CHAT_TREE_SUMMARIZE_PROMPT,
)
from llama_index.core.prompts.default_prompts import (
DEFAULT_REFINE_PROMPT,
DEFAULT_REFINE_TABLE_CONTEXT_PROMPT,
DEFAULT_TEXT_QA_PROMPT,
DEFAULT_TREE_SUMMARIZE_PROMPT,
)
from llama_index.core.prompts.utils import is_chat_model
try:
from llama_index.llms.cohere import (
is_cohere_model,
COHERE_QA_TEMPLATE,
COHERE_REFINE_TEMPLATE,
COHERE_TREE_SUMMARIZE_TEMPLATE,
COHERE_REFINE_TABLE_CONTEXT_PROMPT,
) # pants: no-infer-dep
except ImportError:
COHERE_QA_TEMPLATE = None
COHERE_REFINE_TEMPLATE = None
COHERE_TREE_SUMMARIZE_TEMPLATE = None
COHERE_REFINE_TABLE_CONTEXT_PROMPT = None
# Define prompt selectors for Text QA, Tree Summarize, Refine, and Refine Table.
# Note: Cohere models accept a special argument `documents` for RAG calls. To pass on retrieved documents to the `documents` argument,
# specialised templates have been defined. The conditionals below ensure that these templates are called by default when a retriever
# is called with a Cohere model for generator.
# Text QA
default_text_qa_conditionals = [(is_chat_model, CHAT_TEXT_QA_PROMPT)]
if COHERE_QA_TEMPLATE is not None:
default_text_qa_conditionals = [
(is_cohere_model, COHERE_QA_TEMPLATE),
(is_chat_model, CHAT_TEXT_QA_PROMPT),
]
DEFAULT_TEXT_QA_PROMPT_SEL = SelectorPromptTemplate(
default_template=DEFAULT_TEXT_QA_PROMPT,
conditionals=default_text_qa_conditionals,
)
# Tree Summarize
default_tree_summarize_conditionals = [(is_chat_model, CHAT_TREE_SUMMARIZE_PROMPT)]
if COHERE_TREE_SUMMARIZE_TEMPLATE is not None:
default_tree_summarize_conditionals = [
(is_cohere_model, COHERE_TREE_SUMMARIZE_TEMPLATE),
(is_chat_model, CHAT_TREE_SUMMARIZE_PROMPT),
]
DEFAULT_TREE_SUMMARIZE_PROMPT_SEL = SelectorPromptTemplate(
default_template=DEFAULT_TREE_SUMMARIZE_PROMPT,
conditionals=default_tree_summarize_conditionals,
)
# Refine
default_refine_conditionals = [(is_chat_model, CHAT_REFINE_PROMPT)]
if COHERE_REFINE_TEMPLATE is not None:
default_refine_conditionals = [
(is_cohere_model, COHERE_REFINE_TEMPLATE),
(is_chat_model, CHAT_REFINE_PROMPT),
]
DEFAULT_REFINE_PROMPT_SEL = SelectorPromptTemplate(
default_template=DEFAULT_REFINE_PROMPT,
conditionals=default_refine_conditionals,
)
# Refine Table Context
default_refine_table_conditionals = [(is_chat_model, CHAT_REFINE_TABLE_CONTEXT_PROMPT)]
if COHERE_REFINE_TABLE_CONTEXT_PROMPT is not None:
default_refine_table_conditionals = [
(is_cohere_model, COHERE_REFINE_TABLE_CONTEXT_PROMPT),
(is_chat_model, CHAT_REFINE_TABLE_CONTEXT_PROMPT),
]
DEFAULT_REFINE_TABLE_CONTEXT_PROMPT_SEL = SelectorPromptTemplate(
default_template=DEFAULT_REFINE_TABLE_CONTEXT_PROMPT,
conditionals=default_refine_table_conditionals,
)
|
"""Default prompt selectors."""
from llama_index.core.prompts import SelectorPromptTemplate
from llama_index.core.prompts.chat_prompts import (
CHAT_REFINE_PROMPT,
CHAT_REFINE_TABLE_CONTEXT_PROMPT,
CHAT_TEXT_QA_PROMPT,
CHAT_TREE_SUMMARIZE_PROMPT,
)
from llama_index.core.prompts.default_prompts import (
DEFAULT_REFINE_PROMPT,
DEFAULT_REFINE_TABLE_CONTEXT_PROMPT,
DEFAULT_TEXT_QA_PROMPT,
DEFAULT_TREE_SUMMARIZE_PROMPT,
)
from llama_index.core.prompts.utils import is_chat_model
try:
from llama_index.llms.cohere import (
is_cohere_model,
COHERE_QA_TEMPLATE,
COHERE_REFINE_TEMPLATE,
COHERE_TREE_SUMMARIZE_TEMPLATE,
COHERE_REFINE_TABLE_CONTEXT_PROMPT,
) # pants: no-infer-dep
except ImportError:
COHERE_QA_TEMPLATE = None
COHERE_REFINE_TEMPLATE = None
COHERE_TREE_SUMMARIZE_TEMPLATE = None
COHERE_REFINE_TABLE_CONTEXT_PROMPT = None
# Define prompt selectors for Text QA, Tree Summarize, Refine, and Refine Table.
# Note: Cohere models accept a special argument `documents` for RAG calls. To pass on retrieved documents to the `documents` argument,
# specialised templates have been defined. The conditionals below ensure that these templates are called by default when a retriever
# is called with a Cohere model for generator.
# Text QA
default_text_qa_conditionals = [(is_chat_model, CHAT_TEXT_QA_PROMPT)]
if COHERE_QA_TEMPLATE is not None:
default_text_qa_conditionals = [
(is_cohere_model, COHERE_QA_TEMPLATE),
(is_chat_model, CHAT_TEXT_QA_PROMPT),
]
DEFAULT_TEXT_QA_PROMPT_SEL = SelectorPromptTemplate(
default_template=DEFAULT_TEXT_QA_PROMPT,
conditionals=default_text_qa_conditionals,
)
# Tree Summarize
default_tree_summarize_conditionals = [(is_chat_model, CHAT_TREE_SUMMARIZE_PROMPT)]
if COHERE_TREE_SUMMARIZE_TEMPLATE is not None:
default_tree_summarize_conditionals = [
(is_cohere_model, COHERE_TREE_SUMMARIZE_TEMPLATE),
(is_chat_model, CHAT_TREE_SUMMARIZE_PROMPT),
]
DEFAULT_TREE_SUMMARIZE_PROMPT_SEL = SelectorPromptTemplate(
default_template=DEFAULT_TREE_SUMMARIZE_PROMPT,
conditionals=default_tree_summarize_conditionals,
)
# Refine
default_refine_conditionals = [(is_chat_model, CHAT_REFINE_PROMPT)]
if COHERE_REFINE_TEMPLATE is not None:
default_refine_conditionals = [
(is_cohere_model, COHERE_REFINE_TEMPLATE),
(is_chat_model, CHAT_REFINE_PROMPT),
]
DEFAULT_REFINE_PROMPT_SEL = SelectorPromptTemplate(
default_template=DEFAULT_REFINE_PROMPT,
conditionals=default_refine_conditionals,
)
# Refine Table Context
default_refine_table_conditionals = [(is_chat_model, CHAT_REFINE_TABLE_CONTEXT_PROMPT)]
if COHERE_REFINE_TABLE_CONTEXT_PROMPT is not None:
default_refine_table_conditionals = [
(is_cohere_model, COHERE_REFINE_TABLE_CONTEXT_PROMPT),
(is_chat_model, CHAT_REFINE_TABLE_CONTEXT_PROMPT),
]
DEFAULT_REFINE_TABLE_CONTEXT_PROMPT_SEL = SelectorPromptTemplate(
default_template=DEFAULT_REFINE_TABLE_CONTEXT_PROMPT,
conditionals=default_refine_table_conditionals,
)
|
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.ports[0]
self.grpc_port = self.ports[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def shutdown(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
@property
def _should_exit(self) -> bool:
return self.http_server.should_exit
|
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.runtime_args.port[0]
self.grpc_port = self.runtime_args.port[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def shutdown(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
@property
def _should_exit(self) -> bool:
return self.http_server.should_exit
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.5.0", "benchmark, docs, examples, tests"),
"scikit-image": ("0.19.0", "docs, examples, tests"),
"pandas": ("1.4.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.11.0", "tests"),
"black": ("24.3.0", "tests"),
"mypy": ("1.15", "tests"),
"pyamg": ("4.2.1", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("8.4.0", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.7", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.5.0", "benchmark, docs, examples, tests"),
"scikit-image": ("0.19.0", "docs, examples, tests"),
"pandas": ("1.4.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.11.0", "tests"),
"black": ("24.3.0", "tests"),
"mypy": ("1.15", "tests"),
"pyamg": ("5.0.0", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("8.4.0", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.7", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
import json
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.slack.base import SlackBaseTool
class SlackGetMessageSchema(BaseModel):
"""Input schema for SlackGetMessages."""
channel_id: str = Field(
...,
description="The channel id, private group, or IM channel to send message to.",
)
class SlackGetMessage(SlackBaseTool):
"""Tool that gets Slack messages."""
name: str = "get_messages"
description: str = "Use this tool to get messages from a channel."
args_schema: Type[SlackGetMessageSchema] = SlackGetMessageSchema
def _run(
self,
channel_id: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
logging.getLogger(__name__)
try:
result = self.client.conversations_history(channel=channel_id)
messages = result["messages"]
filtered_messages = [
{key: message[key] for key in ("user", "text", "ts")}
for message in messages
if "user" in message and "text" in message and "ts" in message
]
return json.dumps(filtered_messages, ensure_ascii=False)
except Exception as e:
return "Error creating conversation: {}".format(e)
|
import json
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.slack.base import SlackBaseTool
class SlackGetMessageSchema(BaseModel):
"""Input schema for SlackGetMessages."""
channel_id: str = Field(
...,
description="The channel id, private group, or IM channel to send message to.",
)
class SlackGetMessage(SlackBaseTool): # type: ignore[override, override]
"""Tool that gets Slack messages."""
name: str = "get_messages"
description: str = "Use this tool to get messages from a channel."
args_schema: Type[SlackGetMessageSchema] = SlackGetMessageSchema
def _run(
self,
channel_id: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
logging.getLogger(__name__)
try:
result = self.client.conversations_history(channel=channel_id)
messages = result["messages"]
filtered_messages = [
{key: message[key] for key in ("user", "text", "ts")}
for message in messages
if "user" in message and "text" in message and "ts" in message
]
return json.dumps(filtered_messages, ensure_ascii=False)
except Exception as e:
return "Error creating conversation: {}".format(e)
|
from dataclasses import dataclass
DEFAULT_USER_ID = "3e53486c-cf57-477e-ba2a-cb02dc828e1a"
DEFAULT_EMAIL = "default@example.com"
# Using dataclass here to avoid adding dependency on pydantic
@dataclass(frozen=True)
class User:
user_id: str
email: str
phone_number: str
role: str
@classmethod
def from_payload(cls, payload):
return cls(
user_id=payload["sub"],
email=payload.get("email", ""),
phone_number=payload.get("phone", ""),
role=payload["role"],
)
|
from dataclasses import dataclass
# Using dataclass here to avoid adding dependency on pydantic
@dataclass(frozen=True)
class User:
user_id: str
email: str
phone_number: str
role: str
@classmethod
def from_payload(cls, payload):
return cls(
user_id=payload["sub"],
email=payload.get("email", ""),
phone_number=payload.get("phone", ""),
role=payload["role"],
)
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class StringLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
oov_token="[OOV]",
mask_token="[MASK]",
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.adapt(["a", "a", "a", "b", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_fixed_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
@pytest.mark.skipif(
not backend.backend() == "tensorflow", reason="Requires tf.SparseTensor"
)
def test_sparse_inputs(self):
import tensorflow as tf
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = tf.SparseTensor(
indices=[[0, 0], [1, 1], [2, 2]],
values=["b", "c", "d"],
dense_shape=(3, 3),
)
output = layer(input_data)
self.assertIsInstance(output, tf.SparseTensor)
self.assertAllClose(output, np.array([[2, 0, 0], [0, 3, 0], [0, 0, 0]]))
self.assertAllClose(output.values, np.array([2, 3, 0]))
def test_set_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.set_vocabulary(["a", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_tf_data_compatibility(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, np.array([2, 3, 0]))
|
import numpy as np
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class StringLookupTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
oov_token="[OOV]",
mask_token="[MASK]",
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.adapt(["a", "a", "a", "b", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_fixed_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_set_vocabulary(self):
layer = layers.StringLookup(
output_mode="int",
)
layer.set_vocabulary(["a", "b", "c"])
input_data = ["b", "c", "d"]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([2, 3, 0]))
def test_tf_data_compatibility(self):
layer = layers.StringLookup(
output_mode="int",
vocabulary=["a", "b", "c"],
)
input_data = ["b", "c", "d"]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, np.array([2, 3, 0]))
|
# coding: utf-8
"""Script for generating files with NuGet package metadata."""
import datetime
import sys
from pathlib import Path
from shutil import copyfile
if __name__ == "__main__":
source = Path(sys.argv[1])
current_dir = Path(__file__).absolute().parent
linux_folder_path = current_dir / "runtimes" / "linux-x64" / "native"
linux_folder_path.mkdir(parents=True, exist_ok=True)
osx_folder_path = current_dir / "runtimes" / "osx-x64" / "native"
osx_folder_path.mkdir(parents=True, exist_ok=True)
windows_folder_path = current_dir / "runtimes" / "win-x64" / "native"
windows_folder_path.mkdir(parents=True, exist_ok=True)
build_folder_path = current_dir / "build"
build_folder_path.mkdir(parents=True, exist_ok=True)
copyfile(source / "lib_lightgbm.so", linux_folder_path / "lib_lightgbm.so")
copyfile(source / "lib_lightgbm.dylib", osx_folder_path / "lib_lightgbm.dylib")
copyfile(source / "lib_lightgbm.dll", windows_folder_path / "lib_lightgbm.dll")
copyfile(source / "lightgbm.exe", windows_folder_path / "lightgbm.exe")
version = (current_dir.parent / "VERSION.txt").read_text(encoding="utf-8").strip().replace("rc", "-rc")
nuget_str = rf"""<?xml version="1.0"?>
<package xmlns="http://schemas.microsoft.com/packaging/2013/05/nuspec.xsd">
<metadata>
<id>LightGBM</id>
<version>{version}</version>
<authors>Guolin Ke</authors>
<owners>Guolin Ke</owners>
<license type="expression">MIT</license>
<projectUrl>https://github.com/microsoft/LightGBM</projectUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
<description>A fast, distributed, high performance gradient boosting framework</description>
<copyright>Copyright {datetime.datetime.now().year} @ Microsoft</copyright>
<tags>machine-learning data-mining distributed native boosting gbdt</tags>
<dependencies> </dependencies>
</metadata>
<files>
<file src="build\**" target="build"/>
<file src="runtimes\**" target="runtimes"/>
</files>
</package>
"""
prop_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Condition="Exists('packages.config') OR
Exists('$(MSBuildProjectName).packages.config') OR
Exists('packages.$(MSBuildProjectName).config')">
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.dll"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.exe"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
</ItemGroup>
</Project>
"""
target_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<EnableLightGBMUnsupportedPlatformTargetCheck Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == ''">true</EnableLightGBMUnsupportedPlatformTargetCheck>
</PropertyGroup>
<Target Name="_LightGBMCheckForUnsupportedPlatformTarget"
Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == 'true'"
AfterTargets="_CheckForInvalidConfigurationAndPlatform">
<Error Condition="'$(PlatformTarget)' != 'x64' AND
('$(OutputType)' == 'Exe' OR '$(OutputType)'=='WinExe') AND
!('$(TargetFrameworkIdentifier)' == '.NETCoreApp' AND '$(PlatformTarget)' == '')"
Text="LightGBM currently supports 'x64' processor architectures. Please ensure your application is targeting 'x64'." />
</Target>
</Project>
"""
(current_dir / "LightGBM.nuspec").write_text(nuget_str, encoding="utf-8")
(current_dir / "build" / "LightGBM.props").write_text(prop_str, encoding="utf-8")
(current_dir / "build" / "LightGBM.targets").write_text(target_str, encoding="utf-8")
|
# coding: utf-8
"""Script for generating files with NuGet package metadata."""
import datetime
import sys
from pathlib import Path
from shutil import copyfile
if __name__ == "__main__":
source = Path(sys.argv[1])
current_dir = Path(__file__).absolute().parent
linux_folder_path = current_dir / "runtimes" / "linux-x64" / "native"
linux_folder_path.mkdir(parents=True, exist_ok=True)
osx_folder_path = current_dir / "runtimes" / "osx-x64" / "native"
osx_folder_path.mkdir(parents=True, exist_ok=True)
windows_folder_path = current_dir / "runtimes" / "win-x64" / "native"
windows_folder_path.mkdir(parents=True, exist_ok=True)
build_folder_path = current_dir / "build"
build_folder_path.mkdir(parents=True, exist_ok=True)
copyfile(source / "lib_lightgbm.so", linux_folder_path / "lib_lightgbm.so")
copyfile(source / "lib_lightgbm.dylib", osx_folder_path / "lib_lightgbm.dylib")
copyfile(source / "lib_lightgbm.dll", windows_folder_path / "lib_lightgbm.dll")
copyfile(source / "lightgbm.exe", windows_folder_path / "lightgbm.exe")
version = (current_dir.parent / "VERSION.txt").read_text(encoding="utf-8").strip().replace("rc", "-rc")
nuget_str = rf"""<?xml version="1.0"?>
<package xmlns="http://schemas.microsoft.com/packaging/2013/05/nuspec.xsd">
<metadata>
<id>LightGBM</id>
<version>{version}</version>
<authors>Guolin Ke</authors>
<owners>Guolin Ke</owners>
<license type="expression">MIT</license>
<projectUrl>https://github.com/microsoft/LightGBM</projectUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
<description>A fast, distributed, high performance gradient boosting framework</description>
<copyright>Copyright {datetime.datetime.now().year} @ Microsoft</copyright>
<tags>machine-learning data-mining distributed native boosting gbdt</tags>
<dependencies> </dependencies>
</metadata>
<files>
<file src="build\**" target="build"/>
<file src="runtimes\**" target="runtimes"/>
</files>
</package>
"""
prop_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Condition="Exists('packages.config') OR
Exists('$(MSBuildProjectName).packages.config') OR
Exists('packages.$(MSBuildProjectName).config')">
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.dll"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.exe"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
</ItemGroup>
</Project>
"""
target_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<EnableLightGBMUnsupportedPlatformTargetCheck Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == ''">true</EnableLightGBMUnsupportedPlatformTargetCheck>
</PropertyGroup>
<Target Name="_LightGBMCheckForUnsupportedPlatformTarget"
Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == 'true'"
AfterTargets="_CheckForInvalidConfigurationAndPlatform">
<Error Condition="'$(PlatformTarget)' != 'x64' AND
('$(OutputType)' == 'Exe' OR '$(OutputType)'=='WinExe') AND
!('$(TargetFrameworkIdentifier)' == '.NETCoreApp' AND '$(PlatformTarget)' == '')"
Text="LightGBM currently supports 'x64' processor architectures. Please ensure your application is targeting 'x64'." />
</Target>
</Project>
"""
(current_dir / "LightGBM.nuspec").write_text(nuget_str, encoding="utf-8")
(current_dir / "build" / "LightGBM.props").write_text(prop_str, encoding="utf-8")
(current_dir / "build" / "LightGBM.targets").write_text(target_str, encoding="utf-8")
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_800mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')),
neck=dict(
type='FPN',
in_channels=[64, 128, 288, 672],
out_channels=256,
num_outs=5))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005),
clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_800mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')),
neck=dict(
type='FPN',
in_channels=[64, 128, 288, 672],
out_channels=256,
num_outs=5))
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
"""
The pre-trained models produce embeddings of size 512 - 1024. However, when storing a large
number of embeddings, this requires quite a lot of memory / storage.
In this example, we reduce the dimensionality of the embeddings to e.g. 128 dimensions. This significantly
reduces the required memory / storage while maintaining nearly the same performance.
For dimensionality reduction, we compute embeddings for a large set of (representative) sentence. Then,
we use PCA to find e.g. 128 principle components of our vector space. This allows us to maintain
us much information as possible with only 128 dimensions.
PCA gives us a matrix that down-projects vectors to 128 dimensions. We use this matrix
and extend our original SentenceTransformer model with this linear downproject. Hence,
the new SentenceTransformer model will produce directly embeddings with 128 dimensions
without further changes needed.
"""
from sklearn.decomposition import PCA
from sentence_transformers import SentenceTransformer, LoggingHandler, util, evaluation, models, InputExample
import logging
import os
import gzip
import csv
import random
import numpy as np
import torch
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Model for which we apply dimensionality reduction
model = SentenceTransformer("all-MiniLM-L6-v2")
# New size for the embeddings
new_dimension = 128
# We use AllNLI as a source of sentences to compute PCA
nli_dataset_path = "datasets/AllNLI.tsv.gz"
# We use the STS benchmark dataset to see how much performance we loose by using the dimensionality reduction
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# We measure the performance of the original model
# and later we will measure the performance with the reduces dimension size
logger.info("Read STSbenchmark test dataset")
eval_examples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "test":
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
eval_examples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
# Evaluate the original model on the STS benchmark dataset
stsb_evaluator = evaluation.EmbeddingSimilarityEvaluator.from_input_examples(eval_examples, name="sts-benchmark-test")
logger.info("Original model performance:")
stsb_evaluator(model)
######## Reduce the embedding dimensions ########
# Read sentences from NLI dataset
nli_sentences = set()
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
nli_sentences.add(row["sentence1"])
nli_sentences.add(row["sentence2"])
nli_sentences = list(nli_sentences)
random.shuffle(nli_sentences)
# To determine the PCA matrix, we need some example sentence embeddings.
# Here, we compute the embeddings for 20k random sentences from the AllNLI dataset
pca_train_sentences = nli_sentences[0:20000]
train_embeddings = model.encode(pca_train_sentences, convert_to_numpy=True)
# Compute PCA on the train embeddings matrix
pca = PCA(n_components=new_dimension)
pca.fit(train_embeddings)
pca_comp = np.asarray(pca.components_)
# We add a dense layer to the model, so that it will produce directly embeddings with the new size
dense = models.Dense(
in_features=model.get_sentence_embedding_dimension(),
out_features=new_dimension,
bias=False,
activation_function=torch.nn.Identity(),
)
dense.linear.weight = torch.nn.Parameter(torch.tensor(pca_comp))
model.add_module("dense", dense)
# Evaluate the model with the reduce embedding size
logger.info("Model with {} dimensions:".format(new_dimension))
stsb_evaluator(model)
# If you like, you can store the model on disc by uncommenting the following line
# model.save('models/my-128dim-model')
# You can then load the adapted model that produces 128 dimensional embeddings like this:
# model = SentenceTransformer('models/my-128dim-model')
|
"""
The pre-trained models produce embeddings of size 512 - 1024. However, when storing a large
number of embeddings, this requires quite a lot of memory / storage.
In this example, we reduce the dimensionality of the embeddings to e.g. 128 dimensions. This significantly
reduces the required memory / storage while maintaining nearly the same performance.
For dimensionality reduction, we compute embeddings for a large set of (representative) sentence. Then,
we use PCA to find e.g. 128 principle components of our vector space. This allows us to maintain
us much information as possible with only 128 dimensions.
PCA gives us a matrix that down-projects vectors to 128 dimensions. We use this matrix
and extend our original SentenceTransformer model with this linear downproject. Hence,
the new SentenceTransformer model will produce directly embeddings with 128 dimensions
without further changes needed.
"""
from sklearn.decomposition import PCA
from sentence_transformers import SentenceTransformer, LoggingHandler, util, evaluation, models, InputExample
import logging
import os
import gzip
import csv
import random
import numpy as np
import torch
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Model for which we apply dimensionality reduction
model = SentenceTransformer("all-MiniLM-L6-v2")
# New size for the embeddings
new_dimension = 128
# We use AllNLI as a source of sentences to compute PCA
nli_dataset_path = "datasets/AllNLI.tsv.gz"
# We use the STS benchmark dataset to see how much performance we loose by using the dimensionality reduction
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# We measure the performance of the original model
# and later we will measure the performance with the reduces dimension size
logger.info("Read STSbenchmark test dataset")
eval_examples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "test":
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
eval_examples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
# Evaluate the original model on the STS benchmark dataset
stsb_evaluator = evaluation.EmbeddingSimilarityEvaluator.from_input_examples(eval_examples, name="sts-benchmark-test")
logger.info("Original model performance:")
stsb_evaluator(model)
######## Reduce the embedding dimensions ########
# Read sentences from NLI dataset
nli_sentences = set()
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
nli_sentences.add(row["sentence1"])
nli_sentences.add(row["sentence2"])
nli_sentences = list(nli_sentences)
random.shuffle(nli_sentences)
# To determine the PCA matrix, we need some example sentence embeddings.
# Here, we compute the embeddings for 20k random sentences from the AllNLI dataset
pca_train_sentences = nli_sentences[0:20000]
train_embeddings = model.encode(pca_train_sentences, convert_to_numpy=True)
# Compute PCA on the train embeddings matrix
pca = PCA(n_components=new_dimension)
pca.fit(train_embeddings)
pca_comp = np.asarray(pca.components_)
# We add a dense layer to the model, so that it will produce directly embeddings with the new size
dense = models.Dense(
in_features=model.get_sentence_embedding_dimension(),
out_features=new_dimension,
bias=False,
activation_function=torch.nn.Identity(),
)
dense.linear.weight = torch.nn.Parameter(torch.tensor(pca_comp))
model.add_module("dense", dense)
# Evaluate the model with the reduce embedding size
logger.info("Model with {} dimensions:".format(new_dimension))
stsb_evaluator(model)
# If you like, you can store the model on disc by uncommenting the following line
# model.save('models/my-128dim-model')
# You can then load the adapted model that produces 128 dimensional embeddings like this:
# model = SentenceTransformer('models/my-128dim-model')
|
import torch
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import Datapoint
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._video import Video
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
def wrap(wrappee, *, like, **kwargs):
"""Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.datapoint.Datapoint` subclass as ``like``.
If ``like`` is a :class:`~torchvision.datapoint.BoundingBoxes`, the ``format`` and ``canvas_size`` of
``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``.
Args:
wrappee (Tensor): The tensor to convert.
like (Datapoint): The
kwargs: Can contain "format" and "canvas_size" if ``like`` is a :class:`~torchvision.datapoint.BoundingBoxes`.
Ignored otherwise.
"""
if isinstance(like, BoundingBoxes):
return BoundingBoxes._wrap(
wrappee,
format=kwargs.get("format", like.format),
canvas_size=kwargs.get("canvas_size", like.canvas_size),
)
else:
return wrappee.as_subclass(type(like))
|
import torch
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import Datapoint
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._video import Video
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.saving.file_editor import KerasFileEditor
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.saving_api import load_model
from keras.src.saving.saving_api import load_weights
from keras.src.saving.saving_api import save_model
from keras.src.saving.saving_api import save_weights
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.saving_api import load_model
from keras.src.saving.saving_api import load_weights
from keras.src.saving.saving_api import save_model
from keras.src.saving.saving_api import save_weights
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
|
import os.path
from pathlib import Path
from typing import Any, Callable, Optional, Union
import numpy as np
from PIL import Image
from .utils import check_integrity, download_url, verify_str_arg
from .vision import VisionDataset
class SVHN(VisionDataset):
"""`SVHN <http://ufldl.stanford.edu/housenumbers/>`_ Dataset.
Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset,
we assign the label `0` to the digit `0` to be compatible with PyTorch loss functions which
expect the class labels to be in the range `[0, C-1]`
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load data from `.mat` format.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset where the data is stored.
split (string): One of {'train', 'test', 'extra'}.
Accordingly dataset is selected. 'extra' is Extra training set.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
split_list = {
"train": [
"http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat",
"e26dedcc434d2e4c54c9b2d4a06d8373",
],
"test": [
"http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat",
"eb5a983be6a315427106f1b164d9cef3",
],
"extra": [
"http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat",
"a93ce644f1a588dc4d68dda5feec44a7",
],
}
def __init__(
self,
root: Union[str, Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.split = verify_str_arg(split, "split", tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat["X"]
# loading from the .mat file gives an np.ndarray of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.labels = loaded_mat["y"].astype(np.int64).squeeze()
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.labels, self.labels == 10, 0)
self.data = np.transpose(self.data, (3, 2, 0, 1))
def __getitem__(self, index: int) -> tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self) -> None:
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self) -> str:
return "Split: {split}".format(**self.__dict__)
|
import os.path
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import numpy as np
from PIL import Image
from .utils import check_integrity, download_url, verify_str_arg
from .vision import VisionDataset
class SVHN(VisionDataset):
"""`SVHN <http://ufldl.stanford.edu/housenumbers/>`_ Dataset.
Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset,
we assign the label `0` to the digit `0` to be compatible with PyTorch loss functions which
expect the class labels to be in the range `[0, C-1]`
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load data from `.mat` format.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset where the data is stored.
split (string): One of {'train', 'test', 'extra'}.
Accordingly dataset is selected. 'extra' is Extra training set.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
split_list = {
"train": [
"http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat",
"e26dedcc434d2e4c54c9b2d4a06d8373",
],
"test": [
"http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat",
"eb5a983be6a315427106f1b164d9cef3",
],
"extra": [
"http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat",
"a93ce644f1a588dc4d68dda5feec44a7",
],
}
def __init__(
self,
root: Union[str, Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.split = verify_str_arg(split, "split", tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat["X"]
# loading from the .mat file gives an np.ndarray of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.labels = loaded_mat["y"].astype(np.int64).squeeze()
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.labels, self.labels == 10, 0)
self.data = np.transpose(self.data, (3, 2, 0, 1))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self) -> None:
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self) -> str:
return "Split: {split}".format(**self.__dict__)
|
import click
from .bump import bump
from .cmd_exec import cmd_exec
from .info import info
@click.group(short_help="Manage packages in the monorepo")
def pkg():
pass # pragma: no cover
pkg.add_command(info)
pkg.add_command(cmd_exec, name="exec")
pkg.add_command(bump)
|
import click
from .cmd_exec import cmd_exec
from .info import info
@click.group(short_help="Manage packages in the monorepo")
def pkg():
pass # pragma: no cover
pkg.add_command(info)
pkg.add_command(cmd_exec, name="exec")
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from typing import Tuple
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_batch_input_shape(model: nn.Module, ori_wh: Tuple[int,
int]) -> dict:
cfg = model.cfg
w, h = ori_wh
cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8), 'img_id': 0}
data = test_pipeline(data)
data['inputs'] = [data['inputs']]
data['data_samples'] = [data['data_samples']]
data_sample = model.data_preprocessor(data, False)['data_samples']
batch_input_shape = data_sample[0].batch_input_shape
return batch_input_shape
def pack_data(frame_resize: np.ndarray, batch_input_shape: Tuple[int, int],
ori_shape: Tuple[int, int]) -> dict:
assert frame_resize.shape[:2] == batch_input_shape
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_shape':
batch_input_shape,
'ori_shape':
ori_shape,
'scale_factor': (batch_input_shape[0] / ori_shape[0],
batch_input_shape[1] / ori_shape[1])
})
frame_resize = torch.from_numpy(frame_resize).permute((2, 0, 1)).cuda()
data = {'inputs': [frame_resize], 'data_samples': [data_sample]}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
batch_input_shape = prefetch_batch_input_shape(
model, (video_origin.width, video_origin.height))
ori_shape = (video_origin.height, video_origin.width)
resize_wh = batch_input_shape[::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for i, (frame_resize, frame_origin) in enumerate(
zip(track_iter_progress(video_resize), video_origin)):
data = pack_data(frame_resize, batch_input_shape, ori_shape)
result = model.test_step(data)[0]
visualizer.add_datasample(
name='video',
image=frame_origin,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame_mask = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from typing import Tuple
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_batch_input_shape(model: nn.Module, ori_wh: Tuple[int,
int]) -> dict:
cfg = model.cfg
w, h = ori_wh
cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8), 'img_id': 0}
data = test_pipeline(data)
_, data_sample = model.data_preprocessor([data], False)
batch_input_shape = data_sample[0].batch_input_shape
return batch_input_shape
def pack_data(frame_resize: np.ndarray, batch_input_shape: Tuple[int, int],
ori_shape: Tuple[int, int]) -> dict:
assert frame_resize.shape[:2] == batch_input_shape
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_shape':
batch_input_shape,
'ori_shape':
ori_shape,
'scale_factor': (batch_input_shape[0] / ori_shape[0],
batch_input_shape[1] / ori_shape[1])
})
frame_resize = torch.from_numpy(frame_resize).permute((2, 0, 1))
data = {'inputs': frame_resize, 'data_sample': data_sample}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
batch_input_shape = prefetch_batch_input_shape(
model, (video_origin.width, video_origin.height))
ori_shape = (video_origin.height, video_origin.width)
resize_wh = batch_input_shape[::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for i, (frame_resize, frame_origin) in enumerate(
zip(track_iter_progress(video_resize), video_origin)):
data = pack_data(frame_resize, batch_input_shape, ori_shape)
result = model.test_step([data])[0]
visualizer.add_datasample(
name='video',
image=frame_origin,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame_mask = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
_base_ = './cascade-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
from typing import Dict, TYPE_CHECKING, Optional
if TYPE_CHECKING:
from docarray import Document
from docarray.array.queryset.lookup import Q, LookupNode, LookupLeaf
LOGICAL_OPERATORS = {'$and': 'and', '$or': 'or', '$not': True}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'$gte': 'gte',
'$eq': 'exact',
'$neq': 'neq',
'$exists': 'exists',
}
REGEX_OPERATORS = {'$regex': 'regex'}
ARRAY_OPERATORS = {'$size': 'size'}
MEMBERSHIP_OPERATORS = {'$in': 'in', '$nin': 'nin'}
SUPPORTED_OPERATORS = {
**COMPARISON_OPERATORS,
**ARRAY_OPERATORS,
**REGEX_OPERATORS,
**MEMBERSHIP_OPERATORS,
}
def _parse_lookups(data: Dict = {}, root_node: Optional[LookupNode] = None):
if isinstance(data, dict):
for key, value in data.items():
if isinstance(root_node, LookupLeaf):
root = LookupNode()
root.add_child(root_node)
root_node = root
if key in LOGICAL_OPERATORS:
if key == '$not':
node = LookupNode(negate=LOGICAL_OPERATORS[key])
else:
node = LookupNode(op=LOGICAL_OPERATORS[key])
node = _parse_lookups(value, root_node=node)
elif key.startswith('$'):
raise ValueError(
f'The operator {key} is not supported yet, please double check the given filters!'
)
else:
if not value or not isinstance(value, dict):
raise ValueError(
'''Not a valid query. It should follow the format:
{ <field1>: { <operator1>: <value1> }, ... }
'''
)
items = list(value.items())
if len(items) == 1:
op, val = items[0]
if op in LOGICAL_OPERATORS:
if op == '$not':
node = LookupNode(negate=LOGICAL_OPERATORS[op])
else:
node = LookupNode(op=LOGICAL_OPERATORS[op])
node = _parse_lookups(val, root_node=node)
elif op in SUPPORTED_OPERATORS:
node = Q(**{f'{key}__{SUPPORTED_OPERATORS[op]}': val})
else:
raise ValueError(
f'The operator {op} is not supported yet, please double check the given filters!'
)
else:
node = LookupNode()
for op, val in items:
_node = _parse_lookups({key: {op: val}})
node.add_child(_node)
if root_node and node:
root_node.add_child(node)
elif node:
root_node = node
elif isinstance(data, list):
for d in data:
node = _parse_lookups(d)
if root_node and node:
root_node.add_child(node)
elif node:
root_node = node
else:
raise ValueError(f'The query is illegal: `{data}`')
return root_node
class QueryParser:
"""A class to parse dict condition to lookup query."""
def __init__(self, conditions: Dict = {}):
self.conditions = conditions
self.lookup_groups = _parse_lookups(self.conditions)
def evaluate(self, doc: 'Document'):
return self.lookup_groups.evaluate(doc) if self.lookup_groups else True
def __call__(self, doc: 'Document'):
return self.evaluate(doc)
|
from typing import Dict, TYPE_CHECKING, Optional
if TYPE_CHECKING:
from docarray import Document
from docarray.array.queryset.lookup import Q, LookupNode, LookupLeaf
LOGICAL_OPERATORS = {'$and': 'and', '$or': 'or', '$not': True}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'$gte': 'gte',
'$eq': 'exact',
'$neq': 'neq',
'$exists': 'exists',
}
REGEX_OPERATORS = {'$regex': 'regex'}
ARRAY_OPERATORS = {'$size': 'size'}
MEMBERSHIP_OPERATORS = {'$in': 'in', '$nin': 'nin'}
SUPPORTED_OPERATORS = {
**COMPARISON_OPERATORS,
**ARRAY_OPERATORS,
**REGEX_OPERATORS,
**MEMBERSHIP_OPERATORS,
}
def _parse_lookups(data: Dict = {}, root_node: Optional[LookupNode] = None):
if isinstance(data, dict):
for key, value in data.items():
if isinstance(root_node, LookupLeaf):
root = LookupNode()
root.add_child(root_node)
root_node = root
if key in LOGICAL_OPERATORS:
if key == '$not':
node = LookupNode(negate=LOGICAL_OPERATORS[key])
else:
node = LookupNode(op=LOGICAL_OPERATORS[key])
node = _parse_lookups(value, root_node=node)
elif key.startswith('$'):
raise ValueError(
f'The operator {key} is not supported yet, please double check the given filters!'
)
else:
if not value or not isinstance(value, dict):
raise ValueError(
'''Not a valid query. It should follow the format:
{ <field1>: { <operator1>: <value1> }, ... }
'''
)
items = list(value.items())
if len(items) == 1:
op, val = items[0]
if op in LOGICAL_OPERATORS:
if op == '$not':
node = LookupNode(negate=LOGICAL_OPERATORS[op])
else:
node = LookupNode(op=LOGICAL_OPERATORS[op])
node = _parse_lookups(val, root_node=node)
elif op in SUPPORTED_OPERATORS:
node = Q(**{f'{key}__{SUPPORTED_OPERATORS[op]}': val})
else:
raise ValueError(
f'The operator {op} is not supported yet, please double check the given filters!'
)
else:
node = LookupNode()
for op, val in items:
_node = _parse_lookups({key: {op: val}})
node.add_child(_node)
if root_node and node:
root_node.add_child(node)
elif node:
root_node = node
elif isinstance(data, list):
for d in data:
node = _parse_lookups(d)
if root_node and node:
root_node.add_child(node)
elif node:
root_node = node
else:
raise ValueError(f'The query is illegal: {data}')
return root_node
class QueryParser:
"""A class to parse dict condition to lookup query."""
def __init__(self, conditions: Dict = {}):
self.conditions = conditions
self.lookup_groups = _parse_lookups(self.conditions)
def evaluate(self, doc: 'Document'):
return self.lookup_groups.evaluate(doc) if self.lookup_groups else True
def __call__(self, doc: 'Document'):
return self.evaluate(doc)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset',
'CocoDataset',
'DeepFashionDataset',
'VOCDataset',
'CityscapesDataset',
'LVISDataset',
'LVISV05Dataset',
'LVISV1Dataset',
'WIDERFaceDataset',
'get_loading_pipeline',
'CocoPanopticDataset',
'MultiImageMixDataset',
'OpenImagesDataset',
'OpenImagesChallengeDataset',
'AspectRatioBatchSampler',
'ClassAwareSampler',
'MultiSourceSampler',
'GroupMultiSourceSampler',
'BaseDetDataset',
'CrowdHumanDataset',
'Objects365V1Dataset',
'Objects365V2Dataset',
'DSDLDetDataset',
'BaseVideoDataset',
'MOTChallengeDataset',
'TrackImgSampler',
'ReIDDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset',
'CocoDataset',
'DeepFashionDataset',
'VOCDataset',
'CityscapesDataset',
'LVISDataset',
'LVISV05Dataset',
'LVISV1Dataset',
'WIDERFaceDataset',
'get_loading_pipeline',
'CocoPanopticDataset',
'MultiImageMixDataset',
'OpenImagesDataset',
'OpenImagesChallengeDataset',
'AspectRatioBatchSampler',
'ClassAwareSampler',
'MultiSourceSampler',
'GroupMultiSourceSampler',
'BaseDetDataset',
'CrowdHumanDataset',
'Objects365V1Dataset',
'Objects365V2Dataset',
'DSDLDetDataset',
'BaseVideoDataset',
'MOTChallengeDataset',
'TrackImgSampler'
]
|
"""
Checkpoint functionality for machine learning models.
This module provides classes for saving and loading model checkpoints in a distributed
training environment. It includes functionality for coordinating checkpoint operations
across multiple processes and customizing the checkpoint process through hooks.
Key components:
- Checkpointer: Main class for orchestrating checkpoint operations (save, load)
- CheckpointWriter: Handles writing state dictionaries to storage
- CheckpointReader: Handles reading state dictionaries from storage read
- Barrier: Synchronization mechanism for distributed checkpointing
- RankInfo: Information about the current rank in a distributed environment
"""
from .barriers import (
Barrier,
BarrierConfig,
create_barrier_from_config,
TCPStoreBarrier,
)
from .checkpoint_reader import CheckpointReader
from .checkpoint_writer import CheckpointWriter, CheckpointWriterConfig, WriterHook
from .types import RankInfo, STATE_DICT
__all__ = [
"Barrier",
"TCPStoreBarrier",
"CheckpointReader",
"CheckpointWriter",
"CheckpointWriterConfig",
"WriterHook",
"BarrierConfig",
"create_barrier_from_config",
"RankInfo",
"STATE_DICT",
]
|
"""
Checkpoint functionality for machine learning models.
This module provides classes for saving and loading model checkpoints in a distributed
training environment. It includes functionality for coordinating checkpoint operations
across multiple processes and customizing the checkpoint process through hooks.
Key components:
- Checkpointer: Main class for orchestrating checkpoint operations (save, load)
- CheckpointWriter: Handles writing state dictionaries to storage
- CheckpointReader: Handles reading state dictionaries from storage read
- Barrier: Synchronization mechanism for distributed checkpointing
- RankInfo: Information about the current rank in a distributed environment
"""
from .barriers import Barrier, TCPStoreBarrier
from .checkpoint_reader import CheckpointReader
from .checkpoint_writer import CheckpointWriter, CheckpointWriterOptions, WriterHook
from .checkpointer import Checkpointer
from .types import RankInfo, STATE_DICT
__all__ = [
"Barrier",
"TCPStoreBarrier",
"CheckpointReader",
"CheckpointWriter",
"CheckpointWriterOptions",
"WriterHook",
"Checkpointer",
"RankInfo",
"STATE_DICT",
]
|
_base_ = [
'./yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py', # noqa: E501
]
dataset_type = 'MOTChallengeDataset'
detector = _base_.model
detector.pop('data_preprocessor')
del _base_.model
model = dict(
type='StrongSORT',
data_preprocessor=dict(
type='TrackDataPreprocessor',
pad_size_divisor=32,
batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(576, 1024),
size_divisor=32,
interval=10)
]),
detector=detector,
reid=dict(
type='BaseReID',
data_preprocessor=dict(type='mmpretrain.ClsDataPreprocessor'),
backbone=dict(
type='mmpretrain.ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1),
head=dict(
type='LinearReIDHead',
num_fcs=1,
in_channels=2048,
fc_channels=1024,
out_channels=128,
num_classes=380,
loss_cls=dict(type='mmpretrain.CrossEntropyLoss', loss_weight=1.0),
loss_triplet=dict(type='TripletLoss', margin=0.3, loss_weight=1.0),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'))),
cmc=dict(
type='CameraMotionCompensation',
warp_mode='cv2.MOTION_EUCLIDEAN',
num_iters=100,
stop_eps=0.00001),
tracker=dict(
type='StrongSORTTracker',
motion=dict(type='KalmanFilter', center_only=False, use_nsa=True),
obj_score_thr=0.6,
reid=dict(
num_samples=None,
img_scale=(256, 128),
img_norm_cfg=dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
match_score_thr=0.3,
motion_weight=0.02,
),
match_iou_thr=0.7,
momentums=dict(embeds=0.1, ),
num_tentatives=2,
num_frames_retain=100),
postprocess_model=dict(
type='AppearanceFreeLink',
checkpoint= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/strongsort/mot_dataset/aflink_motchallenge_20220812_190310-a7578ad3.pth', # noqa: E501
temporal_threshold=(0, 30),
spatial_threshold=50,
confidence_threshold=0.95,
))
train_pipeline = None
test_pipeline = [
dict(
type='TransformBroadcaster',
transforms=[
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='Resize', scale=_base_.img_scale, keep_ratio=True),
dict(
type='Pad',
size_divisor=32,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='LoadTrackAnnotations'),
]),
dict(type='PackTrackInputs')
]
train_dataloader = None
val_dataloader = dict(
# Now StrongSORT only support video_based sampling
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
dataset=dict(
_delete_=True,
type=dataset_type,
data_root=_base_.data_root,
ann_file='annotations/half-val_cocoformat.json',
data_prefix=dict(img_path='train'),
# when you evaluate track performance, you need to remove metainfo
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
train_cfg = None
optim_wrapper = None
# evaluator
val_evaluator = dict(
_delete_=True,
type='MOTChallengeMetric',
metric=['HOTA', 'CLEAR', 'Identity'],
# use_postprocess to support AppearanceFreeLink in val_evaluator
use_postprocess=True,
postprocess_tracklet_cfg=[
dict(
type='InterpolateTracklets',
min_num_frames=5,
max_num_frames=20,
use_gsi=True,
smooth_tau=10)
])
test_evaluator = val_evaluator
default_hooks = dict(logger=dict(type='LoggerHook', interval=1))
del _base_.param_scheduler
del _base_.custom_hooks
|
_base_ = [
'./yolox_x_8xb4-80e_crowdhuman-mot17halftrain_test-mot17halfval.py', # noqa: E501
]
dataset_type = 'MOTChallengeDataset'
detector = _base_.model
detector.pop('data_preprocessor')
del _base_.model
model = dict(
type='StrongSORT',
data_preprocessor=dict(
type='TrackDataPreprocessor',
pad_size_divisor=32,
batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(576, 1024),
size_divisor=32,
interval=10)
]),
detector=detector,
reid=dict(
type='BaseReID',
data_preprocessor=None,
backbone=dict(
type='mmcls.ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1),
head=dict(
type='LinearReIDHead',
num_fcs=1,
in_channels=2048,
fc_channels=1024,
out_channels=128,
num_classes=380,
loss_cls=dict(type='mmcls.CrossEntropyLoss', loss_weight=1.0),
loss_triplet=dict(type='TripletLoss', margin=0.3, loss_weight=1.0),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'))),
cmc=dict(
type='CameraMotionCompensation',
warp_mode='cv2.MOTION_EUCLIDEAN',
num_iters=100,
stop_eps=0.00001),
tracker=dict(
type='StrongSORTTracker',
motion=dict(type='KalmanFilter', center_only=False, use_nsa=True),
obj_score_thr=0.6,
reid=dict(
num_samples=None,
img_scale=(256, 128),
img_norm_cfg=dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
match_score_thr=0.3,
motion_weight=0.02,
),
match_iou_thr=0.7,
momentums=dict(embeds=0.1, ),
num_tentatives=2,
num_frames_retain=100),
postprocess_model=dict(
type='AppearanceFreeLink',
checkpoint= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/strongsort/mot_dataset/aflink_motchallenge_20220812_190310-a7578ad3.pth', # noqa: E501
temporal_threshold=(0, 30),
spatial_threshold=50,
confidence_threshold=0.95,
))
train_pipeline = None
test_pipeline = [
dict(
type='TransformBroadcaster',
transforms=[
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='Resize', scale=_base_.img_scale, keep_ratio=True),
dict(
type='Pad',
size_divisor=32,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='LoadTrackAnnotations'),
]),
dict(type='PackTrackInputs')
]
train_dataloader = None
val_dataloader = dict(
# Now StrongSORT only support video_based sampling
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
dataset=dict(
_delete_=True,
type=dataset_type,
data_root=_base_.data_root,
ann_file='annotations/half-val_cocoformat.json',
data_prefix=dict(img_path='train'),
# when you evaluate track performance, you need to remove metainfo
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
train_cfg = None
optim_wrapper = None
# evaluator
val_evaluator = dict(
_delete_=True,
type='MOTChallengeMetric',
metric=['HOTA', 'CLEAR', 'Identity'],
# use_postprocess to support AppearanceFreeLink in val_evaluator
use_postprocess=True,
postprocess_tracklet_cfg=[
dict(
type='InterpolateTracklets',
min_num_frames=5,
max_num_frames=20,
use_gsi=True,
smooth_tau=10)
])
test_evaluator = val_evaluator
default_hooks = dict(logger=dict(type='LoggerHook', interval=1))
del _base_.param_scheduler
del _base_.custom_hooks
|
"""
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
rng = da.random.default_rng(1)
X = rng.normal(size=(m, n))
y = X.sum(axis=1)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = dxgb.train(
client,
{"verbosity": 1, "tree_method": "hist"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
print("Error:", da.sqrt((prediction - y) ** 2).mean().compute())
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
with Client(cluster) as client:
main(client)
|
"""
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
rng = da.random.default_rng(1)
X = rng.normal(size=(m, n))
y = X.sum(axis=1)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = dxgb.train(
client,
{"verbosity": 1, "tree_method": "hist"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
with Client(cluster) as client:
main(client)
|
"""Test the TextEmbed class."""
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.textembed import TextEmbedEmbedding
def test_textembed_class():
"""Check if BaseEmbedding is one of the base classes of TextEmbedEmbedding."""
assert issubclass(TextEmbedEmbedding, BaseEmbedding), (
"TextEmbedEmbedding does not inherit from BaseEmbedding"
)
if __name__ == "__main__":
test_textembed_class()
print("All tests passed.")
|
"""Test the TextEmbed class."""
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.textembed import TextEmbedEmbedding
def test_textembed_class():
"""Check if BaseEmbedding is one of the base classes of TextEmbedEmbedding."""
assert issubclass(
TextEmbedEmbedding, BaseEmbedding
), "TextEmbedEmbedding does not inherit from BaseEmbedding"
if __name__ == "__main__":
test_textembed_class()
print("All tests passed.")
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
myst_enable_extensions = ['colon_fence']
myst_heading_anchors = 3
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The main toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
language = 'zh_CN'
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMDetection'
copyright = '2018-2021, OpenMMLab'
author = 'MMDetection Authors'
version_file = '../../mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
myst_enable_extensions = ['colon_fence']
myst_heading_anchors = 3
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
},
],
# Specify the language of shared menu
'menu_lang':
'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
language = 'zh_CN'
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
|
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.losses import SparseMultipleNegativesRankingLoss, SpladeLoss
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import SparseEncoderTrainingArguments
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize the SPLADE model
model_name = "sparse-embedding/splade-distilbert-base-uncased-init"
model = SparseEncoder(model_name)
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
lambda_query = 5e-5
lambda_corpus = 3e-5
loss = SpladeLoss(
model=model,
main_loss=SparseMultipleNegativesRankingLoss(model=model, scale=20, similarity_fct=model.similarity),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus,
) # Weight for document loss
run_name = f"splade-distilbert-nq-fresh-lq{lambda_query}-lc{lambda_corpus}"
os.makedirs(f"runs/{run_name}", exist_ok=True)
dev_evaluator = SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=16)
os.makedirs(f"runs/{run_name}/eval", exist_ok=True)
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=f"runs/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=12,
per_device_eval_batch_size=16,
bf16=True,
logging_steps=200,
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
learning_rate=4e-5,
run_name=run_name,
seed=42,
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model, output_path=f"runs/{run_name}/eval", epoch=1)
# 8. Save the trained & evaluated model locally
os.makedirs(f"runs/{run_name}/final", exist_ok=True)
model.save_pretrained(f"runs/{run_name}/final")
model.push_to_hub(f"sparse-embedding/{run_name}", private=True)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers import SimilarityFunction
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SpladePooling,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.losses import SparseMultipleNegativesRankingLoss, SpladeLoss
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import SparseEncoderTrainingArguments
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize the SPLADE model
model_name = "answerdotai/ModernBERT-base"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
similarity_fn_name=SimilarityFunction.DOT_PRODUCT,
)
model.eval()
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
loss = SpladeLoss(
model=model,
main_loss=SparseMultipleNegativesRankingLoss(model=model, scale=20, similarity_fct=model.similarity),
lambda_query=0.1, # Weight for query loss
lambda_corpus=0.08,
) # Weight for document loss
run_name = "splade-ModernBERT-nq-fresh"
os.makedirs(f"runs/{run_name}", exist_ok=True)
dev_evaluator = SparseNanoBEIREvaluator(["msmarco", "nfcorpus", "nq"], show_progress_bar=True, batch_size=16)
os.makedirs(f"runs/{run_name}/eval", exist_ok=True)
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=f"runs/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=8,
per_device_eval_batch_size=16,
bf16=True,
logging_steps=200,
eval_strategy="steps",
eval_steps=2400,
save_strategy="steps",
save_steps=2400,
learning_rate=5e-6,
optim="adamw_torch",
run_name=run_name,
lr_scheduler_type="cosine",
warmup_steps=0,
lr_scheduler_kwargs={
"num_cycles": 0.5,
},
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model, output_path=f"runs/{run_name}/eval", epoch=1)
# 8. Save the trained & evaluated model locally
os.makedirs(f"runs/{run_name}/final", exist_ok=True)
model.save_pretrained(f"runs/{run_name}/final")
model.push_to_hub(run_name)
if __name__ == "__main__":
main()
|
from __future__ import annotations
from collections.abc import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
) -> None:
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../../examples/sentence_transformer/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor, size_average=False) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
from __future__ import annotations
from collections.abc import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
) -> None:
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor, size_average=False) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.csp_darknet import CSPDarknet
from .utils import check_norm_state, is_norm
def test_csp_darknet_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
CSPDarknet(frozen_stages=6)
with pytest.raises(AssertionError):
# out_indices in range(len(arch_setting) + 1)
CSPDarknet(out_indices=[6])
# Test CSPDarknet with first stage frozen
frozen_stages = 1
model = CSPDarknet(frozen_stages=frozen_stages)
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test CSPDarknet with norm_eval=True
model = CSPDarknet(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test CSPDarknet-P5 forward with widen_factor=0.5
model = CSPDarknet(arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet-P6 forward with widen_factor=0.5
model = CSPDarknet(
arch='P6',
widen_factor=0.25,
out_indices=range(0, 6),
spp_kernal_sizes=(3, 5, 7))
model.train()
imgs = torch.randn(1, 3, 128, 128)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 16, 64, 64))
assert feat[1].shape == torch.Size((1, 32, 32, 32))
assert feat[2].shape == torch.Size((1, 64, 16, 16))
assert feat[3].shape == torch.Size((1, 128, 8, 8))
assert feat[4].shape == torch.Size((1, 192, 4, 4))
assert feat[5].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet forward with dict(type='ReLU')
model = CSPDarknet(
widen_factor=0.125, act_cfg=dict(type='ReLU'), out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with BatchNorm forward
model = CSPDarknet(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with custom arch forward
arch_ovewrite = [[32, 56, 3, True, False], [56, 224, 2, True, False],
[224, 512, 1, True, False]]
model = CSPDarknet(
arch_ovewrite=arch_ovewrite,
widen_factor=0.25,
out_indices=(0, 1, 2, 3))
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 8, 16, 16))
assert feat[1].shape == torch.Size((1, 14, 8, 8))
assert feat[2].shape == torch.Size((1, 56, 4, 4))
assert feat[3].shape == torch.Size((1, 128, 2, 2))
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.csp_darknet import CSPDarknet
from .utils import check_norm_state, is_norm
def test_csp_darknet_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
CSPDarknet(frozen_stages=6)
with pytest.raises(AssertionError):
# out_indices in range(len(arch_setting) + 1)
CSPDarknet(out_indices=[6])
# Test CSPDarknet with first stage frozen
frozen_stages = 1
model = CSPDarknet(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test CSPDarknet with norm_eval=True
model = CSPDarknet(norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test CSPDarknet-P5 forward with widen_factor=1.0
model = CSPDarknet(arch='P5', widen_factor=1.0, out_indices=range(0, 5))
model.init_weights()
model.train()
assert check_norm_state(model.modules(), True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 64, 112, 112))
assert feat[1].shape == torch.Size((1, 128, 56, 56))
assert feat[2].shape == torch.Size((1, 256, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
assert feat[4].shape == torch.Size((1, 1024, 7, 7))
# Test CSPDarknet-P5 forward with widen_factor=0.5
model = CSPDarknet(arch='P5', widen_factor=0.5, out_indices=range(0, 5))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 32, 112, 112))
assert feat[1].shape == torch.Size((1, 64, 56, 56))
assert feat[2].shape == torch.Size((1, 128, 28, 28))
assert feat[3].shape == torch.Size((1, 256, 14, 14))
assert feat[4].shape == torch.Size((1, 512, 7, 7))
# Test CSPDarknet-P6 forward with widen_factor=1.5
model = CSPDarknet(
arch='P6',
widen_factor=1.5,
out_indices=range(0, 6),
spp_kernal_sizes=(3, 5, 7))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 320, 320)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 96, 160, 160))
assert feat[1].shape == torch.Size((1, 192, 80, 80))
assert feat[2].shape == torch.Size((1, 384, 40, 40))
assert feat[3].shape == torch.Size((1, 768, 20, 20))
assert feat[4].shape == torch.Size((1, 1152, 10, 10))
assert feat[5].shape == torch.Size((1, 1536, 5, 5))
# Test CSPDarknet forward with dict(type='ReLU')
model = CSPDarknet(
widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 5))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 64, 112, 112))
assert feat[1].shape == torch.Size((1, 128, 56, 56))
assert feat[2].shape == torch.Size((1, 256, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
assert feat[4].shape == torch.Size((1, 1024, 7, 7))
# Test CSPDarknet with BatchNorm forward
model = CSPDarknet(widen_factor=1.0, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 64, 112, 112))
assert feat[1].shape == torch.Size((1, 128, 56, 56))
assert feat[2].shape == torch.Size((1, 256, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
assert feat[4].shape == torch.Size((1, 1024, 7, 7))
# Test CSPDarknet with custom arch forward
arch_ovewrite = [[32, 56, 3, True, False], [56, 224, 2, True, False],
[224, 512, 1, True, False]]
model = CSPDarknet(
arch_ovewrite=arch_ovewrite,
widen_factor=1.0,
out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 32, 112, 112))
assert feat[1].shape == torch.Size((1, 56, 56, 56))
assert feat[2].shape == torch.Size((1, 224, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
|
import pytest
import pytest_socket
import requests
def test_socket_disabled() -> None:
"""This test should fail."""
with pytest.raises(pytest_socket.SocketBlockedError):
# Ignore S113 since we don't need a timeout here as the request
# should fail immediately
requests.get("https://www.example.com") # noqa: S113
|
import pytest
import pytest_socket
import requests
def test_socket_disabled() -> None:
"""This test should fail."""
with pytest.raises(pytest_socket.SocketBlockedError):
# noqa since we don't need a timeout here as the request should fail immediately
requests.get("https://www.example.com") # noqa: S113
|
"""Tool for agent to sleep."""
from asyncio import sleep as asleep
from time import sleep
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
class SleepInput(BaseModel):
"""Input for CopyFileTool."""
sleep_time: int = Field(..., description="Time to sleep in seconds")
class SleepTool(BaseTool):
"""Tool that adds the capability to sleep."""
name: str = "sleep"
args_schema: Type[BaseModel] = SleepInput
description: str = "Make agent sleep for a specified number of seconds."
def _run(
self,
sleep_time: int,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Sleep tool."""
sleep(sleep_time)
return f"Agent slept for {sleep_time} seconds."
async def _arun(
self,
sleep_time: int,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the sleep tool asynchronously."""
await asleep(sleep_time)
return f"Agent slept for {sleep_time} seconds."
|
"""Tool for agent to sleep."""
from asyncio import sleep as asleep
from time import sleep
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
class SleepInput(BaseModel):
"""Input for CopyFileTool."""
sleep_time: int = Field(..., description="Time to sleep in seconds")
class SleepTool(BaseTool): # type: ignore[override]
"""Tool that adds the capability to sleep."""
name: str = "sleep"
args_schema: Type[BaseModel] = SleepInput
description: str = "Make agent sleep for a specified number of seconds."
def _run(
self,
sleep_time: int,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Sleep tool."""
sleep(sleep_time)
return f"Agent slept for {sleep_time} seconds."
async def _arun(
self,
sleep_time: int,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the sleep tool asynchronously."""
await asleep(sleep_time)
return f"Agent slept for {sleep_time} seconds."
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .hooks import *
from .logging import *
from .registry import *
from .runner import *
from .utils import *
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .hooks import *
from .logging import *
from .registry import *
from .utils import *
|
from __future__ import annotations
import csv
import logging
import os
from typing import TYPE_CHECKING
import torch
from torch.utils.data import DataLoader
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.util import batch_to_device
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
Args:
dataloader (DataLoader): the data for the evaluation
"""
super().__init__()
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
self.primary_metric = "accuracy"
def __call__(
self, model: SentenceTransformer, output_path: str | None = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info(f"Accuracy: {accuracy:.4f} ({correct}/{total})\n")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
metrics = {"accuracy": accuracy}
metrics = self.prefix_name_to_metrics(metrics, self.name)
self.store_metrics_in_model_card_data(model, metrics, epoch, steps)
return metrics
|
from __future__ import annotations
import csv
import logging
import os
from typing import TYPE_CHECKING
import torch
from torch.utils.data import DataLoader
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.util import batch_to_device
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
Args:
dataloader (DataLoader): the data for the evaluation
"""
super().__init__()
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
self.primary_metric = "accuracy"
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info(f"Accuracy: {accuracy:.4f} ({correct}/{total})\n")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
metrics = {"accuracy": accuracy}
metrics = self.prefix_name_to_metrics(metrics, self.name)
self.store_metrics_in_model_card_data(model, metrics, epoch, steps)
return metrics
|
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from ...typing import T
class MeshDataMixin:
"""Provide helper functions for :class:`Document` to support 3D mesh data and point cloud."""
def load_uri_to_point_cloud_tensor(
self: 'T', samples: int, as_chunks: bool = False
) -> 'T':
"""Convert a 3d mesh-like :attr:`.uri` into :attr:`.tensor`
:param samples: number of points to sample from the mesh
:param as_chunks: when multiple geometry stored in one mesh file,
then store each geometry into different :attr:`.chunks`
:return: itself after processed
"""
import trimesh
import urllib.parse
scheme = urllib.parse.urlparse(self.uri).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
if as_chunks:
from .. import Document
# try to coerce everything into a scene
scene = loader(self.uri, force='scene')
for geo in scene.geometry.values():
geo: trimesh.Trimesh
self.chunks.append(Document(tensor=np.array(geo.sample(samples))))
else:
# combine a scene into a single mesh
mesh = loader(self.uri, force='mesh')
self.tensor = np.array(mesh.sample(samples))
return self
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ...typing import T
class MeshDataMixin:
"""Provide helper functions for :class:`Document` to support 3D mesh data and point cloud."""
def load_uri_to_point_cloud_tensor(
self: 'T', samples: int, as_chunks: bool = False
) -> 'T':
"""Convert a 3d mesh-like :attr:`.uri` into :attr:`.tensor`
:param samples: number of points to sample from the mesh
:param as_chunks: when multiple geometry stored in one mesh file,
then store each geometry into different :attr:`.chunks`
:return: itself after processed
"""
import trimesh
import urllib.parse
scheme = urllib.parse.urlparse(self.uri).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
if as_chunks:
from .. import Document
# try to coerce everything into a scene
scene = loader(self.uri, force='scene')
for geo in scene.geometry.values():
geo: trimesh.Trimesh
self.chunks.append(Document(tensor=geo.sample(samples)))
else:
# combine a scene into a single mesh
mesh = loader(self.uri, force='mesh')
self.tensor = mesh.sample(samples)
return self
|
import importlib
class LazyModule:
def __init__(self, name, pip_name=None):
self.name = name
pip_name = pip_name or name
self.pip_name = pip_name
self.module = None
self._available = None
@property
def available(self):
if self._available is None:
try:
self.initialize()
self._available = True
except ImportError:
self._available = False
return self._available
def initialize(self):
try:
self.module = importlib.import_module(self.name)
except ImportError:
raise ImportError(
f"This requires the {self.name} module. "
f"You can install it via `pip install {self.pip_name}`"
)
def __getattr__(self, name):
if name == "_api_export_path":
raise AttributeError
if self.module is None:
self.initialize()
return getattr(self.module, name)
def __repr__(self):
return f"LazyModule({self.name})"
tensorflow = LazyModule("tensorflow")
gfile = LazyModule("tensorflow.io.gfile", pip_name="tensorflow")
tensorflow_io = LazyModule("tensorflow_io")
scipy = LazyModule("scipy")
jax = LazyModule("jax")
torchvision = LazyModule("torchvision")
optree = LazyModule("optree")
dmtree = LazyModule("tree")
|
import importlib
class LazyModule:
def __init__(self, name, pip_name=None):
self.name = name
pip_name = pip_name or name
self.pip_name = pip_name
self.module = None
self._available = None
@property
def available(self):
if self._available is None:
try:
self.initialize()
self._available = True
except ImportError:
self._available = False
return self._available
def initialize(self):
try:
self.module = importlib.import_module(self.name)
except ImportError:
raise ImportError(
f"This requires the {self.name} module. "
f"You can install it via `pip install {self.pip_name}`"
)
def __getattr__(self, name):
if name == "_api_export_path":
raise AttributeError
if self.module is None:
self.initialize()
return getattr(self.module, name)
tensorflow = LazyModule("tensorflow")
gfile = LazyModule("tensorflow.io.gfile", pip_name="tensorflow")
tensorflow_io = LazyModule("tensorflow_io")
scipy = LazyModule("scipy")
jax = LazyModule("jax")
torchvision = LazyModule("torchvision")
optree = LazyModule("optree")
dmtree = LazyModule("tree")
|
import fastapi
from .config import Settings
from .middleware import auth_middleware
from .models import DEFAULT_USER_ID, User
def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User:
return verify_user(payload, admin_only=False)
def requires_admin_user(
payload: dict = fastapi.Depends(auth_middleware),
) -> User:
return verify_user(payload, admin_only=True)
def verify_user(payload: dict | None, admin_only: bool) -> User:
if not payload:
if Settings.ENABLE_AUTH:
raise fastapi.HTTPException(
status_code=401, detail="Authorization header is missing"
)
# This handles the case when authentication is disabled
payload = {"sub": DEFAULT_USER_ID, "role": "admin"}
user_id = payload.get("sub")
if not user_id:
raise fastapi.HTTPException(
status_code=401, detail="User ID not found in token"
)
if admin_only and payload["role"] != "admin":
raise fastapi.HTTPException(status_code=403, detail="Admin access required")
return User.from_payload(payload)
|
import fastapi
from .middleware import auth_middleware
from .models import User, DEFAULT_USER_ID, DEFAULT_EMAIL
from .config import Settings
def requires_user(payload: dict = fastapi.Depends(auth_middleware)) -> User:
return verify_user(payload, admin_only=False)
def requires_admin_user(
payload: dict = fastapi.Depends(auth_middleware),
) -> User:
return verify_user(payload, admin_only=True)
def verify_user(payload: dict | None, admin_only: bool) -> User:
if not payload:
if Settings.ENABLE_AUTH:
raise fastapi.HTTPException(
status_code=401, detail="Authorization header is missing"
)
# This handles the case when authentication is disabled
payload = {"sub": DEFAULT_USER_ID, "role": "admin"}
user_id = payload.get("sub")
if not user_id:
raise fastapi.HTTPException(
status_code=401, detail="User ID not found in token"
)
if admin_only and payload["role"] != "admin":
raise fastapi.HTTPException(status_code=403, detail="Admin access required")
return User.from_payload(payload)
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
if TYPE_CHECKING:
from pydantic.fields import ModelField
from pydantic import BaseConfig
from docarray.document.base_node import BaseNode
from docarray.proto import NdArrayProto, NodeProto
T = TypeVar('T', bound='Tensor')
class Tensor(np.ndarray, BaseNode):
@classmethod
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def validate(
cls: Type[T], value: Union[T, Any], field: 'ModelField', config: 'BaseConfig'
) -> T:
if isinstance(value, np.ndarray):
return cls.from_ndarray(value)
elif isinstance(value, Tensor):
return cast(T, value)
else:
try:
arr: np.ndarray = np.ndarray(value)
return cls.from_ndarray(arr)
except Exception:
pass # handled below
raise ValueError(f'Expected a numpy.ndarray, got {type(value)}')
@classmethod
def from_ndarray(cls: Type[T], value: np.ndarray) -> T:
return value.view(cls)
def _to_node_protobuf(self: T, field: str = 'tensor') -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to be
converted into a protobuf
:param field: field in which to store the content in the node proto
:return: the nested item protobuf message
"""
nd_proto = NdArrayProto()
self._flush_tensor_to_proto(nd_proto, value=self)
return NodeProto(**{field: nd_proto})
@classmethod
def _read_from_proto(cls: Type[T], pb_msg: 'NdArrayProto') -> 'T':
"""
read ndarray from a proto msg
:param pb_msg:
:return: a numpy array
"""
source = pb_msg.dense
if source.buffer:
x = np.frombuffer(source.buffer, dtype=source.dtype)
return cls.from_ndarray(x.reshape(source.shape))
elif len(source.shape) > 0:
return cls.from_ndarray(np.zeros(source.shape))
else:
raise ValueError(f'proto message {pb_msg} cannot be cast to a Tensor')
@staticmethod
def _flush_tensor_to_proto(pb_msg: 'NdArrayProto', value: 'Tensor'):
pb_msg.dense.buffer = value.tobytes()
pb_msg.dense.ClearField('shape')
pb_msg.dense.shape.extend(list(value.shape))
pb_msg.dense.dtype = value.dtype.str
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
if TYPE_CHECKING:
from pydantic.fields import ModelField
from pydantic import BaseConfig
from docarray.document.base_node import BaseNode
from docarray.proto import NdArrayProto, NodeProto
T = TypeVar('T', bound='Tensor')
class Tensor(np.ndarray, BaseNode):
@classmethod
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def validate(
cls: Type[T], value: Union[T, Any], field: 'ModelField', config: 'BaseConfig'
) -> T:
if isinstance(value, np.ndarray):
return cls.from_ndarray(value)
elif isinstance(value, Tensor):
return cast(T, value)
else:
try:
arr: np.ndarray = np.ndarray(value)
return cls.from_ndarray(arr)
except Exception:
pass # handled below
raise ValueError(f'Expected a numpy.ndarray, got {type(value)}')
@classmethod
def from_ndarray(cls: Type[T], value: np.ndarray) -> T:
return value.view(cls)
def _to_node_protobuf(self: T) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
nd_proto = NdArrayProto()
self.flush_ndarray(nd_proto, value=self)
NodeProto(tensor=nd_proto)
return NodeProto(tensor=nd_proto)
@classmethod
def read_ndarray(cls: Type[T], pb_msg: 'NdArrayProto') -> 'T':
"""
read ndarray from a proto msg
:param pb_msg:
:return: a numpy array
"""
source = pb_msg.dense
if source.buffer:
x = np.frombuffer(source.buffer, dtype=source.dtype)
return cls.from_ndarray(x.reshape(source.shape))
elif len(source.shape) > 0:
return cls.from_ndarray(np.zeros(source.shape))
else:
raise ValueError(f'proto message {pb_msg} cannot be cast to a Tensor')
@staticmethod
def flush_ndarray(pb_msg: 'NdArrayProto', value: 'Tensor'):
pb_msg.dense.buffer = value.tobytes()
pb_msg.dense.ClearField('shape')
pb_msg.dense.shape.extend(list(value.shape))
pb_msg.dense.dtype = value.dtype.str
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_roi_head import BaseRoIHead
from .bbox_heads import (BBoxHead, ConvFCBBoxHead, DIIHead,
DoubleConvFCBBoxHead, SABLHead, SCNetBBoxHead,
Shared2FCBBoxHead, Shared4Conv1FCBBoxHead)
from .cascade_roi_head import CascadeRoIHead
from .double_roi_head import DoubleHeadRoIHead
from .dynamic_roi_head import DynamicRoIHead
from .grid_roi_head import GridRoIHead
from .htc_roi_head import HybridTaskCascadeRoIHead
from .mask_heads import (CoarseMaskHead, FCNMaskHead, FeatureRelayHead,
FusedSemanticHead, GlobalContextHead, GridHead,
HTCMaskHead, MaskIoUHead, MaskPointHead,
SCNetMaskHead, SCNetSemanticHead)
from .mask_scoring_roi_head import MaskScoringRoIHead
from .multi_instance_roi_head import MultiInstanceRoIHead
from .pisa_roi_head import PISARoIHead
from .point_rend_roi_head import PointRendRoIHead
from .roi_extractors import (BaseRoIExtractor, GenericRoIExtractor,
SingleRoIExtractor)
from .scnet_roi_head import SCNetRoIHead
from .shared_heads import ResLayer
from .sparse_roi_head import SparseRoIHead
from .standard_roi_head import StandardRoIHead
from .trident_roi_head import TridentRoIHead
__all__ = [
'BaseRoIHead', 'CascadeRoIHead', 'DoubleHeadRoIHead', 'MaskScoringRoIHead',
'HybridTaskCascadeRoIHead', 'GridRoIHead', 'ResLayer', 'BBoxHead',
'ConvFCBBoxHead', 'DIIHead', 'SABLHead', 'Shared2FCBBoxHead',
'StandardRoIHead', 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead',
'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
'MaskIoUHead', 'BaseRoIExtractor', 'GenericRoIExtractor',
'SingleRoIExtractor', 'PISARoIHead', 'PointRendRoIHead', 'MaskPointHead',
'CoarseMaskHead', 'DynamicRoIHead', 'SparseRoIHead', 'TridentRoIHead',
'SCNetRoIHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'SCNetBBoxHead',
'FeatureRelayHead', 'GlobalContextHead', 'MultiInstanceRoIHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_roi_head import BaseRoIHead
from .bbox_heads import (BBoxHead, ConvFCBBoxHead, DIIHead,
DoubleConvFCBBoxHead, SABLHead, SCNetBBoxHead,
Shared2FCBBoxHead, Shared4Conv1FCBBoxHead)
from .cascade_roi_head import CascadeRoIHead
from .double_roi_head import DoubleHeadRoIHead
from .dynamic_roi_head import DynamicRoIHead
from .grid_roi_head import GridRoIHead
from .htc_roi_head import HybridTaskCascadeRoIHead
from .mask_heads import (CoarseMaskHead, FCNMaskHead, FeatureRelayHead,
FusedSemanticHead, GlobalContextHead, GridHead,
HTCMaskHead, MaskIoUHead, MaskPointHead,
SCNetMaskHead, SCNetSemanticHead)
from .mask_scoring_roi_head import MaskScoringRoIHead
from .pisa_roi_head import PISARoIHead
from .point_rend_roi_head import PointRendRoIHead
from .roi_extractors import (BaseRoIExtractor, GenericRoIExtractor,
SingleRoIExtractor)
from .scnet_roi_head import SCNetRoIHead
from .shared_heads import ResLayer
from .sparse_roi_head import SparseRoIHead
from .standard_roi_head import StandardRoIHead
from .trident_roi_head import TridentRoIHead
__all__ = [
'BaseRoIHead', 'CascadeRoIHead', 'DoubleHeadRoIHead', 'MaskScoringRoIHead',
'HybridTaskCascadeRoIHead', 'GridRoIHead', 'ResLayer', 'BBoxHead',
'ConvFCBBoxHead', 'DIIHead', 'SABLHead', 'Shared2FCBBoxHead',
'StandardRoIHead', 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead',
'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
'MaskIoUHead', 'BaseRoIExtractor', 'GenericRoIExtractor',
'SingleRoIExtractor', 'PISARoIHead', 'PointRendRoIHead', 'MaskPointHead',
'CoarseMaskHead', 'DynamicRoIHead', 'SparseRoIHead', 'TridentRoIHead',
'SCNetRoIHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'SCNetBBoxHead',
'FeatureRelayHead', 'GlobalContextHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
from .quasi_dense_tracker import QuasiDenseTracker
__all__ = ['BaseTracker', 'ByteTracker', 'QuasiDenseTracker']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
__all__ = ['BaseTracker', 'ByteTracker']
|
import pytest
import datasets
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
def pytest_configure(config):
config.addinivalue_line("markers", "torchaudio_latest: mark test to run with torchaudio>=0.12")
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_metrics_cache = test_hf_cache_home / "metrics"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
|
import pytest
import datasets
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
def pytest_configure(config):
config.addinivalue_line("markers", "torchaudio_latest: mark test to run with torchaudio>=0.12")
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_metrics_cache = test_hf_cache_home / "metrics"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.19.5"
SCIPY_MIN_VERSION = "1.6.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.3.4", "benchmark, docs, examples, tests"),
"scikit-image": ("0.17.2", "docs, examples, tests"),
"pandas": ("1.2.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.5.1", "tests"),
"black": ("24.3.0", "tests"),
"mypy": ("1.9", "tests"),
"pyamg": ("4.0.0", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("7.1.2", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.7", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.19.5"
SCIPY_MIN_VERSION = "1.6.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.3.4", "benchmark, docs, examples, tests"),
"scikit-image": ("0.17.2", "docs, examples, tests"),
"pandas": ("1.2.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.5.1", "tests"),
"black": ("24.3.0", "tests"),
"mypy": ("1.9", "tests"),
"pyamg": ("4.0.0", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("7.1.2", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.6", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionXLKDiffusionPipeline
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionXLKPipelineIntegrationTests(unittest.TestCase):
dtype = torch.float16
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_stable_diffusion_xl(self):
sd_pipe = StableDiffusionXLKDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=self.dtype
)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
sd_pipe.set_scheduler("sample_euler")
prompt = "A painting of a squirrel eating a burger"
generator = torch.manual_seed(0)
output = sd_pipe(
[prompt],
generator=generator,
guidance_scale=9.0,
num_inference_steps=2,
height=512,
width=512,
output_type="np",
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.5420, 0.5038, 0.2439, 0.5371, 0.4660, 0.1906, 0.5221, 0.4290, 0.2566])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_stable_diffusion_karras_sigmas(self):
sd_pipe = StableDiffusionXLKDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=self.dtype
)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
sd_pipe.set_scheduler("sample_dpmpp_2m")
prompt = "A painting of a squirrel eating a burger"
generator = torch.manual_seed(0)
output = sd_pipe(
[prompt],
generator=generator,
guidance_scale=7.5,
num_inference_steps=2,
output_type="np",
use_karras_sigmas=True,
height=512,
width=512,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.6418, 0.6424, 0.6462, 0.6271, 0.6314, 0.6295, 0.6249, 0.6339, 0.6335])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_stable_diffusion_noise_sampler_seed(self):
sd_pipe = StableDiffusionXLKDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=self.dtype
)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
sd_pipe.set_scheduler("sample_dpmpp_sde")
prompt = "A painting of a squirrel eating a burger"
seed = 0
images1 = sd_pipe(
[prompt],
generator=torch.manual_seed(seed),
noise_sampler_seed=seed,
guidance_scale=9.0,
num_inference_steps=2,
output_type="np",
height=512,
width=512,
).images
images2 = sd_pipe(
[prompt],
generator=torch.manual_seed(seed),
noise_sampler_seed=seed,
guidance_scale=9.0,
num_inference_steps=2,
output_type="np",
height=512,
width=512,
).images
assert images1.shape == (1, 512, 512, 3)
assert images2.shape == (1, 512, 512, 3)
assert np.abs(images1.flatten() - images2.flatten()).max() < 1e-2
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionXLKDiffusionPipeline
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
enable_full_determinism()
@slow
@require_torch_gpu
class StableDiffusionXLKPipelineIntegrationTests(unittest.TestCase):
dtype = torch.float16
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_stable_diffusion_xl(self):
sd_pipe = StableDiffusionXLKDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=self.dtype
)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
sd_pipe.set_scheduler("sample_euler")
prompt = "A painting of a squirrel eating a burger"
generator = torch.manual_seed(0)
output = sd_pipe(
[prompt],
generator=generator,
guidance_scale=9.0,
num_inference_steps=2,
height=512,
width=512,
output_type="np",
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.5420, 0.5038, 0.2439, 0.5371, 0.4660, 0.1906, 0.5221, 0.4290, 0.2566])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_stable_diffusion_karras_sigmas(self):
sd_pipe = StableDiffusionXLKDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=self.dtype
)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
sd_pipe.set_scheduler("sample_dpmpp_2m")
prompt = "A painting of a squirrel eating a burger"
generator = torch.manual_seed(0)
output = sd_pipe(
[prompt],
generator=generator,
guidance_scale=7.5,
num_inference_steps=2,
output_type="np",
use_karras_sigmas=True,
height=512,
width=512,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.6418, 0.6424, 0.6462, 0.6271, 0.6314, 0.6295, 0.6249, 0.6339, 0.6335])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_stable_diffusion_noise_sampler_seed(self):
sd_pipe = StableDiffusionXLKDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=self.dtype
)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
sd_pipe.set_scheduler("sample_dpmpp_sde")
prompt = "A painting of a squirrel eating a burger"
seed = 0
images1 = sd_pipe(
[prompt],
generator=torch.manual_seed(seed),
noise_sampler_seed=seed,
guidance_scale=9.0,
num_inference_steps=2,
output_type="np",
height=512,
width=512,
).images
images2 = sd_pipe(
[prompt],
generator=torch.manual_seed(seed),
noise_sampler_seed=seed,
guidance_scale=9.0,
num_inference_steps=2,
output_type="np",
height=512,
width=512,
).images
assert images1.shape == (1, 512, 512, 3)
assert images2.shape == (1, 512, 512, 3)
assert np.abs(images1.flatten() - images2.flatten()).max() < 1e-2
|
import shutil
import tempfile
import unittest
from transformers import Owlv2Processor
from transformers.testing_utils import require_scipy
from ...test_processing_common import ProcessorTesterMixin
@require_scipy
class Owlv2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Owlv2Processor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
processor = cls.processor_class.from_pretrained("google/owlv2-base-patch16-ensemble")
processor.save_pretrained(cls.tmpdirname)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
|
import shutil
import tempfile
import unittest
import pytest
from transformers import Owlv2Processor
from transformers.testing_utils import require_scipy
from ...test_processing_common import ProcessorTesterMixin
@require_scipy
class Owlv2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Owlv2Processor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
processor = cls.processor_class.from_pretrained("google/owlv2-base-patch16-ensemble")
processor.save_pretrained(cls.tmpdirname)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
def test_processor_query_images_positional(self):
processor_components = self.prepare_components()
processor = Owlv2Processor(**processor_components)
image_input = self.prepare_image_inputs()
query_images = self.prepare_image_inputs()
inputs = processor(None, image_input, query_images)
self.assertListEqual(list(inputs.keys()), ["query_pixel_values", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.