input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from ..utils import is_torch_available
if is_torch_available():
from .group_offloading import apply_group_offloading
from .hooks import HookRegistry, ModelHook
from .layerwise_casting import apply_layerwise_casting, apply_layerwise_casting_hook
from .pyramid_attention_broadcast import PyramidAttentionBroadcastConfig, apply_pyramid_attention_broadcast
|
from ..utils import is_torch_available
if is_torch_available():
from .hooks import HookRegistry, ModelHook
from .layerwise_casting import apply_layerwise_casting, apply_layerwise_casting_hook
from .pyramid_attention_broadcast import PyramidAttentionBroadcastConfig, apply_pyramid_attention_broadcast
|
from typing import Optional
import torch
from docarray import BaseDoc, DocList
from docarray.typing import TorchTensor
def test_torch_train():
class Mmdoc(BaseDoc):
text: str
tensor: Optional[TorchTensor[3, 224, 224]]
N = 10
batch = DocList[Mmdoc](Mmdoc(text=f'hello{i}') for i in range(N))
batch.tensor = torch.zeros(N, 3, 224, 224)
batch = batch.to_doc_vec()
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 16, 3)
def forward(self, x):
return self.conv(x)
model = Model()
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for _ in range(2):
loss = model(batch.tensor).sum()
loss.backward()
opt.step()
|
from typing import Optional
import torch
from docarray import BaseDoc, DocList
from docarray.typing import TorchTensor
def test_torch_train():
class Mmdoc(BaseDoc):
text: str
tensor: Optional[TorchTensor[3, 224, 224]]
N = 10
batch = DocList[Mmdoc](Mmdoc(text=f'hello{i}') for i in range(N))
batch.tensor = torch.zeros(N, 3, 224, 224)
batch = batch.stack()
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 16, 3)
def forward(self, x):
return self.conv(x)
model = Model()
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for _ in range(2):
loss = model(batch.tensor).sum()
loss.backward()
opt.step()
|
"""DeepInfra API base URL."""
API_BASE = "https://api.deepinfra.com"
"""DeepInfra Inference API endpoint."""
INFERENCE_ENDPOINT = "v1/openai/completions"
"""Chat API endpoint for DeepInfra."""
CHAT_API_ENDPOINT = "v1/openai/chat/completions"
"""Environment variable name of DeepInfra API token."""
ENV_VARIABLE = "DEEPINFRA_API_TOKEN"
"""Default model name for DeepInfra embeddings."""
DEFAULT_MODEL_NAME = "mistralai/Mixtral-8x22B-Instruct-v0.1"
"""Default max tokens. DeepInfra documentation constant is set."""
DEFAULT_MAX_TOKENS = 512
"""Tool choice is auto."""
TOOL_CHOICE = "auto"
"""User agent"""
USER_AGENT = "llama-index-llms-deepinfra"
|
"""DeepInfra API base URL."""
API_BASE = "https://api.deepinfra.com"
"""DeepInfra Inference API endpoint."""
INFERENCE_ENDPOINT = "v1/openai/completions"
"""Chat API endpoint for DeepInfra."""
CHAT_API_ENDPOINT = "v1/openai/chat/completions"
"""Environment variable name of DeepInfra API token."""
ENV_VARIABLE = "DEEPINFRA_API_TOKEN"
"""Default model name for DeepInfra embeddings."""
DEFAULT_MODEL_NAME = "mistralai/Mixtral-8x22B-Instruct-v0.1"
"""Default max tokens. DeepInfra documentation constant is set."""
DEFAULT_MAX_TOKENS = 512
"""Tool choice is auto."""
TOOL_CHOICE = "auto"
"""User agent"""
USER_AGENT = "llama-index-llms-deepinfra"
|
from .tensor import Tensor
Embedding = Tensor
|
import numpy as np
from .tensor import Tensor
Embedding = Tensor
|
import inspect
from keras.src.api_export import keras_export
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize
from keras.src.quantizers.quantizers import compute_float8_amax_history
from keras.src.quantizers.quantizers import compute_float8_scale
from keras.src.quantizers.quantizers import fake_quant_with_min_max_vars
from keras.src.quantizers.quantizers import pack_int4
from keras.src.quantizers.quantizers import quantize_and_dequantize
from keras.src.quantizers.quantizers import unpack_int4
from keras.src.saving import serialization_lib
from keras.src.utils.naming import to_snake_case
ALL_OBJECTS = {Quantizer, AbsMaxQuantizer}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
@keras_export("keras.quantizers.serialize")
def serialize(initializer):
return serialization_lib.serialize_keras_object(initializer)
@keras_export("keras.quantizers.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras quantizer object via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.quantizers.get")
def get(identifier, **kwargs):
"""Retrieve a Keras quantizer object via an identifier."""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj(kwargs)
return obj
else:
raise ValueError(
f"Could not interpret quantizer identifier: {identifier}"
)
|
import inspect
from keras.src.api_export import keras_export
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize
from keras.src.quantizers.quantizers import compute_float8_amax_history
from keras.src.quantizers.quantizers import compute_float8_scale
from keras.src.quantizers.quantizers import fake_quant_with_min_max_vars
from keras.src.quantizers.quantizers import quantize_and_dequantize
from keras.src.saving import serialization_lib
from keras.src.utils.naming import to_snake_case
ALL_OBJECTS = {Quantizer, AbsMaxQuantizer}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
@keras_export("keras.quantizers.serialize")
def serialize(initializer):
return serialization_lib.serialize_keras_object(initializer)
@keras_export("keras.quantizers.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras quantizer object via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.quantizers.get")
def get(identifier, **kwargs):
"""Retrieve a Keras quantizer object via an identifier."""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj(kwargs)
return obj
else:
raise ValueError(
f"Could not interpret quantizer identifier: {identifier}"
)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import NucliaTextTransformer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"NucliaTextTransformer": "langchain_community.document_transformers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NucliaTextTransformer",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import NucliaTextTransformer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"NucliaTextTransformer": "langchain_community.document_transformers"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NucliaTextTransformer",
]
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import shutil
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope="session", autouse=True)
def download_cache():
subprocess.run(
'scripts/download_full.sh',
cwd=Path(__file__).parents[1],
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
yield
shutil.rmtree(Path(__file__).parents[1] / '.cache')
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import shutil
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope="session", autouse=True)
def download_cache():
subprocess.run(
'scripts/download_full.sh',
cwd=Path(__file__).parents[1],
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
yield
shutil.rmtree(Path(__file__).parents[1] / '.cache')
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
|
from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model
from .conv_emformer import ConvEmformer
from .hifi_gan import (
hifigan_generator,
hifigan_generator_v1,
hifigan_generator_v2,
hifigan_generator_v3,
HiFiGANGenerator,
)
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"ConvEmformer",
"conformer_wav2vec2_model",
"conformer_wav2vec2_base",
"conformer_wav2vec2_pretrain_model",
"conformer_wav2vec2_pretrain_base",
"conformer_wav2vec2_pretrain_large",
"ConformerWav2Vec2PretrainModel",
"emformer_hubert_base",
"emformer_hubert_model",
"HiFiGANGenerator",
"hifigan_generator_v1",
"hifigan_generator_v2",
"hifigan_generator_v3",
"hifigan_generator",
]
|
from ._conformer_wav2vec2 import (
conformer_wav2vec2_base,
conformer_wav2vec2_model,
conformer_wav2vec2_pretrain_base,
conformer_wav2vec2_pretrain_large,
conformer_wav2vec2_pretrain_model,
ConformerWav2Vec2PretrainModel,
)
from ._emformer_hubert import emformer_hubert_base, emformer_hubert_model
from .conv_emformer import ConvEmformer
from .rnnt import conformer_rnnt_base, conformer_rnnt_model
__all__ = [
"conformer_rnnt_base",
"conformer_rnnt_model",
"ConvEmformer",
"conformer_wav2vec2_model",
"conformer_wav2vec2_base",
"conformer_wav2vec2_pretrain_model",
"conformer_wav2vec2_pretrain_base",
"conformer_wav2vec2_pretrain_large",
"ConformerWav2Vec2PretrainModel",
"emformer_hubert_base",
"emformer_hubert_model",
]
|
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments, losses
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize the SPLADE model
model_name = "sparse-embedding/splade-distilbert-base-uncased-init"
model = SparseEncoder(model_name)
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
lambda_query = 5e-5
lambda_corpus = 3e-5
loss = losses.SpladeLoss(
model=model,
main_loss=losses.SparseMultipleNegativesRankingLoss(model=model, scale=20, similarity_fct=model.similarity),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus,
) # Weight for document loss
run_name = f"splade-distilbert-nq-fresh-lq{lambda_query}-lc{lambda_corpus}"
os.makedirs(f"runs/{run_name}", exist_ok=True)
dev_evaluator = SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=16)
os.makedirs(f"runs/{run_name}/eval", exist_ok=True)
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=f"runs/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=12,
per_device_eval_batch_size=16,
bf16=True,
logging_steps=200,
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
learning_rate=4e-5,
run_name=run_name,
seed=42,
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model, output_path=f"runs/{run_name}/eval", epoch=1)
# 8. Save the trained & evaluated model locally
os.makedirs(f"runs/{run_name}/final", exist_ok=True)
model.save_pretrained(f"runs/{run_name}/final")
model.push_to_hub(f"sparse-embedding/{run_name}", private=True)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.losses import SparseMultipleNegativesRankingLoss, SpladeLoss
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import SparseEncoderTrainingArguments
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize the SPLADE model
model_name = "sparse-embedding/splade-distilbert-base-uncased-init"
model = SparseEncoder(model_name)
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
lambda_query = 5e-5
lambda_corpus = 3e-5
loss = SpladeLoss(
model=model,
main_loss=SparseMultipleNegativesRankingLoss(model=model, scale=20, similarity_fct=model.similarity),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus,
) # Weight for document loss
run_name = f"splade-distilbert-nq-fresh-lq{lambda_query}-lc{lambda_corpus}"
os.makedirs(f"runs/{run_name}", exist_ok=True)
dev_evaluator = SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=16)
os.makedirs(f"runs/{run_name}/eval", exist_ok=True)
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=f"runs/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=12,
per_device_eval_batch_size=16,
bf16=True,
logging_steps=200,
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
learning_rate=4e-5,
run_name=run_name,
seed=42,
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model, output_path=f"runs/{run_name}/eval", epoch=1)
# 8. Save the trained & evaluated model locally
os.makedirs(f"runs/{run_name}/final", exist_ok=True)
model.save_pretrained(f"runs/{run_name}/final")
model.push_to_hub(f"sparse-embedding/{run_name}", private=True)
if __name__ == "__main__":
main()
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='ndarray_embedding')
class NdArrayEmbedding(NdArray, EmbeddingMixin):
alternative_type = NdArray
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='ndarray_embedding')
class NdArrayEmbedding(NdArray, EmbeddingMixin):
alternative_type = NdArray
|
from typing import Any, Union
from ..utils import add_end_docstrings, is_vision_available
from .base import GenericTensor, Pipeline, build_pipeline_init_args
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
@add_end_docstrings(
build_pipeline_init_args(has_image_processor=True),
"""
image_processor_kwargs (`dict`, *optional*):
Additional dictionary of keyword arguments passed along to the image processor e.g.
{"size": {"height": 100, "width": 100}}
pool (`bool`, *optional*, defaults to `False`):
Whether or not to return the pooled output. If `False`, the model will return the raw hidden states.
""",
)
class ImageFeatureExtractionPipeline(Pipeline):
"""
Image feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
Example:
```python
>>> from transformers import pipeline
>>> extractor = pipeline(model="google/vit-base-patch16-224", task="image-feature-extraction")
>>> result = extractor("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", return_tensors=True)
>>> result.shape # This is a tensor of shape [1, sequence_lenth, hidden_dimension] representing the input image.
torch.Size([1, 197, 768])
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This image feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier:
`"image-feature-extraction"`.
All vision models may be used for this pipeline. See a list of all models, including community-contributed models on
[huggingface.co/models](https://huggingface.co/models).
"""
def _sanitize_parameters(self, image_processor_kwargs=None, return_tensors=None, pool=None, **kwargs):
preprocess_params = {} if image_processor_kwargs is None else image_processor_kwargs
postprocess_params = {}
if pool is not None:
postprocess_params["pool"] = pool
if return_tensors is not None:
postprocess_params["return_tensors"] = return_tensors
if "timeout" in kwargs:
preprocess_params["timeout"] = kwargs["timeout"]
return preprocess_params, {}, postprocess_params
def preprocess(self, image, timeout=None, **image_processor_kwargs) -> dict[str, GenericTensor]:
image = load_image(image, timeout=timeout)
model_inputs = self.image_processor(image, return_tensors=self.framework, **image_processor_kwargs)
if self.framework == "pt":
model_inputs = model_inputs.to(self.torch_dtype)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, pool=None, return_tensors=False):
pool = pool if pool is not None else False
if pool:
if "pooler_output" not in model_outputs:
raise ValueError(
"No pooled output was returned. Make sure the model has a `pooler` layer when using the `pool` option."
)
outputs = model_outputs["pooler_output"]
else:
# [0] is the first available tensor, logits or last_hidden_state.
outputs = model_outputs[0]
if return_tensors:
return outputs
if self.framework == "pt":
return outputs.tolist()
elif self.framework == "tf":
return outputs.numpy().tolist()
def __call__(self, *args: Union[str, "Image.Image", list["Image.Image"], list[str]], **kwargs: Any) -> list[Any]:
"""
Extract the features of the input(s).
Args:
images (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
images.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and
the call may block forever.
Return:
A nested list of `float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs)
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_vision_available
from .base import GenericTensor, Pipeline, build_pipeline_init_args
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
@add_end_docstrings(
build_pipeline_init_args(has_image_processor=True),
"""
image_processor_kwargs (`dict`, *optional*):
Additional dictionary of keyword arguments passed along to the image processor e.g.
{"size": {"height": 100, "width": 100}}
pool (`bool`, *optional*, defaults to `False`):
Whether or not to return the pooled output. If `False`, the model will return the raw hidden states.
""",
)
class ImageFeatureExtractionPipeline(Pipeline):
"""
Image feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
Example:
```python
>>> from transformers import pipeline
>>> extractor = pipeline(model="google/vit-base-patch16-224", task="image-feature-extraction")
>>> result = extractor("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", return_tensors=True)
>>> result.shape # This is a tensor of shape [1, sequence_lenth, hidden_dimension] representing the input image.
torch.Size([1, 197, 768])
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This image feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier:
`"image-feature-extraction"`.
All vision models may be used for this pipeline. See a list of all models, including community-contributed models on
[huggingface.co/models](https://huggingface.co/models).
"""
def _sanitize_parameters(self, image_processor_kwargs=None, return_tensors=None, pool=None, **kwargs):
preprocess_params = {} if image_processor_kwargs is None else image_processor_kwargs
postprocess_params = {}
if pool is not None:
postprocess_params["pool"] = pool
if return_tensors is not None:
postprocess_params["return_tensors"] = return_tensors
if "timeout" in kwargs:
preprocess_params["timeout"] = kwargs["timeout"]
return preprocess_params, {}, postprocess_params
def preprocess(self, image, timeout=None, **image_processor_kwargs) -> Dict[str, GenericTensor]:
image = load_image(image, timeout=timeout)
model_inputs = self.image_processor(image, return_tensors=self.framework, **image_processor_kwargs)
if self.framework == "pt":
model_inputs = model_inputs.to(self.torch_dtype)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, pool=None, return_tensors=False):
pool = pool if pool is not None else False
if pool:
if "pooler_output" not in model_outputs:
raise ValueError(
"No pooled output was returned. Make sure the model has a `pooler` layer when using the `pool` option."
)
outputs = model_outputs["pooler_output"]
else:
# [0] is the first available tensor, logits or last_hidden_state.
outputs = model_outputs[0]
if return_tensors:
return outputs
if self.framework == "pt":
return outputs.tolist()
elif self.framework == "tf":
return outputs.numpy().tolist()
def __call__(self, *args: Union[str, "Image.Image", List["Image.Image"], List[str]], **kwargs: Any) -> List[Any]:
"""
Extract the features of the input(s).
Args:
images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
images.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and
the call may block forever.
Return:
A nested list of `float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
|
from torch import nn, Tensor
from typing import Iterable, Dict
import torch.nn.functional as F
from enum import Enum
from ..SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
):
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
from torch import nn, Tensor
from typing import Iterable, Dict
import torch.nn.functional as F
from enum import Enum
from ..SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""
The metric for the triplet loss
"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
):
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
:param model: SentenceTransformerModel
:param distance_metric: Function to compute distance between two embeddings. The class TripletDistanceMetric
contains common distance metrices that can be used.
:param triplet_margin: The negative should be at least this much further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
from torch.utils.data import DataLoader
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [
InputExample(texts=['Anchor 1', 'Positive 1', 'Negative 1']),
InputExample(texts=['Anchor 2', 'Positive 2', 'Negative 2']),
]
train_batch_size = 1
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.TripletLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
from langchain_core.language_models import (
BaseLanguageModel,
LanguageModelInput,
LanguageModelOutput,
get_tokenizer,
)
from langchain_core.language_models.base import _get_token_ids_default_method
__all__ = [
"BaseLanguageModel",
"LanguageModelInput",
"LanguageModelOutput",
"_get_token_ids_default_method",
"get_tokenizer",
]
|
from langchain_core.language_models import (
BaseLanguageModel,
LanguageModelInput,
LanguageModelOutput,
get_tokenizer,
)
from langchain_core.language_models.base import _get_token_ids_default_method
__all__ = [
"get_tokenizer",
"BaseLanguageModel",
"_get_token_ids_default_method",
"LanguageModelInput",
"LanguageModelOutput",
]
|
_base_ = './mask-rcnn_r50-contrib_fpn_gn-all_2x_coco.py'
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
_base_ = './mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py'
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...dpr_text import DPRTextEncoder
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=DPRTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...dpr_text import DPRTextEncoder
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=DPRTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
'--volumes=.cache:/workspace/.cache',
],
timeout=30,
check=True,
)
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ImageUrl')
IMAGE_FILE_FORMATS = ('png', 'jpeg', 'jpg')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to a .png, .jpeg, or .jpg file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_image_extension = any(url.endswith(ext) for ext in IMAGE_FILE_FORMATS)
if not has_image_extension:
raise ValueError(
f'Image URL must have one of the following extensions:'
f'{IMAGE_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> np.ndarray:
"""
Load the data from the url into a numpy.ndarray image tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDocument):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: np.ndarray representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ImageUrl')
IMAGE_FILE_FORMATS = ('png', 'jpeg', 'jpg')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to a .png, .jpeg, or .jpg file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_image_extension = any(url.endswith(ext) for ext in IMAGE_FILE_FORMATS)
if not has_image_extension:
raise ValueError(
f'Image URL must have one of the following extensions:'
f'{IMAGE_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> np.ndarray:
"""
Load the data from the url into a numpy.ndarray image tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDocument):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: np.ndarray representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import pytest
from xgboost.testing.federated import run_federated_learning
@pytest.mark.parametrize("with_ssl", [True, False])
def test_federated_learning(with_ssl: bool) -> None:
run_federated_learning(with_ssl, False, __file__)
|
#!/usr/bin/python
import multiprocessing
import sys
import time
import xgboost as xgb
import xgboost.federated
SERVER_KEY = 'server-key.pem'
SERVER_CERT = 'server-cert.pem'
CLIENT_KEY = 'client-key.pem'
CLIENT_CERT = 'client-cert.pem'
def run_server(port: int, world_size: int, with_ssl: bool) -> None:
if with_ssl:
xgboost.federated.run_federated_server(port, world_size, SERVER_KEY, SERVER_CERT,
CLIENT_CERT)
else:
xgboost.federated.run_federated_server(port, world_size)
def run_worker(port: int, world_size: int, rank: int, with_ssl: bool, with_gpu: bool) -> None:
communicator_env = {
'xgboost_communicator': 'federated',
'federated_server_address': f'localhost:{port}',
'federated_world_size': world_size,
'federated_rank': rank
}
if with_ssl:
communicator_env['federated_server_cert'] = SERVER_CERT
communicator_env['federated_client_key'] = CLIENT_KEY
communicator_env['federated_client_cert'] = CLIENT_CERT
# Always call this before using distributed module
with xgb.collective.CommunicatorContext(**communicator_env):
# Load file, file will not be sharded in federated mode.
dtrain = xgb.DMatrix('agaricus.txt.train-%02d?format=libsvm' % rank)
dtest = xgb.DMatrix('agaricus.txt.test-%02d?format=libsvm' % rank)
# Specify parameters via map, definition are same as c++ version
param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}
if with_gpu:
param['tree_method'] = 'hist'
param['device'] = f"cuda:{rank}"
# Specify validations set to watch performance
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 20
# Run training, all the features in training API is available.
bst = xgb.train(param, dtrain, num_round, evals=watchlist,
early_stopping_rounds=2)
# Save the model, only ask process 0 to save the model.
if xgb.collective.get_rank() == 0:
bst.save_model("test.model.json")
xgb.collective.communicator_print("Finished training\n")
def run_federated(with_ssl: bool = True, with_gpu: bool = False) -> None:
port = 9091
world_size = int(sys.argv[1])
server = multiprocessing.Process(target=run_server, args=(port, world_size, with_ssl))
server.start()
time.sleep(1)
if not server.is_alive():
raise Exception("Error starting Federated Learning server")
workers = []
for rank in range(world_size):
worker = multiprocessing.Process(target=run_worker,
args=(port, world_size, rank, with_ssl, with_gpu))
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
server.terminate()
if __name__ == '__main__':
run_federated(with_ssl=True, with_gpu=False)
run_federated(with_ssl=False, with_gpu=False)
run_federated(with_ssl=True, with_gpu=True)
run_federated(with_ssl=False, with_gpu=True)
|
_base_ = '../retinanet/retinanet_r50_caffe_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='GARetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.4,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0),
center_ratio=0.2,
ignore_ratio=0.5))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = '../retinanet/retinanet_r50_caffe_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='GARetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.4,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0),
center_ratio=0.2,
ignore_ratio=0.5))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
|
from llama_index.core import Document, MockEmbedding, global_tokenizer
from llama_index.core.llms import MockLLM
from llama_index.packs.raptor.base import RaptorRetriever
import pytest
@pytest.mark.skipif(
condition=(global_tokenizer is None), reason="No global tokenizer set"
)
def test_raptor() -> None:
retriever = RaptorRetriever(
[
Document(text="one"),
Document(text="two"),
Document(text="three"),
Document(text="four"),
Document(text="five"),
Document(text="six"),
Document(text="seven"),
Document(text="eight"),
Document(text="nine"),
Document(text="ten"),
],
embed_model=MockEmbedding(embed_dim=1536),
llm=MockLLM(),
)
assert len(retriever.index.docstore.docs) == 13
nodes = retriever.retrieve("test", mode="collapsed")
assert len(nodes) == 2
nodes = retriever.retrieve("text", mode="tree_traversal")
assert len(nodes) == 5
|
from llama_index.core import Document, MockEmbedding, global_tokenizer
from llama_index.core.llms import MockLLM
from llama_index.packs.raptor.base import RaptorRetriever
import pytest
@pytest.mark.skipif(
condition = (global_tokenizer is None), reason="No global tokenizer set"
)
def test_raptor() -> None:
retriever = RaptorRetriever(
[
Document(text="one"),
Document(text="two"),
Document(text="three"),
Document(text="four"),
Document(text="five"),
Document(text="six"),
Document(text="seven"),
Document(text="eight"),
Document(text="nine"),
Document(text="ten"),
],
embed_model=MockEmbedding(embed_dim=1536),
llm=MockLLM(),
)
assert len(retriever.index.docstore.docs) == 13
nodes = retriever.retrieve("test", mode="collapsed")
assert len(nodes) == 2
nodes = retriever.retrieve("text", mode="tree_traversal")
assert len(nodes) == 5
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class AutoAssign(SingleStageDetector):
"""Implementation of `AutoAssign: Differentiable Label Assignment for Dense
Object Detection <https://arxiv.org/abs/2007.03496>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of AutoAssign. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of AutoAssign. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class AutoAssign(SingleStageDetector):
"""Implementation of `AutoAssign: Differentiable Label Assignment for Dense
Object Detection <https://arxiv.org/abs/2007.03496>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of AutoAssign. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of AutoAssign. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_openai import ChatOpenAI
class TestOpenAIResponses(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"use_responses_api": True}
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return (
{
"OPENAI_API_KEY": "api_key",
"OPENAI_ORG_ID": "org_id",
"OPENAI_API_BASE": "api_base",
"OPENAI_PROXY": "https://proxy.com",
},
{},
{
"openai_api_key": "api_key",
"openai_organization": "org_id",
"openai_api_base": "api_base",
"openai_proxy": "https://proxy.com",
},
)
|
"""Standard LangChain interface tests"""
from typing import Tuple, Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_openai import ChatOpenAI
class TestOpenAIResponses(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"use_responses_api": True}
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
return (
{
"OPENAI_API_KEY": "api_key",
"OPENAI_ORG_ID": "org_id",
"OPENAI_API_BASE": "api_base",
"OPENAI_PROXY": "https://proxy.com",
},
{},
{
"openai_api_key": "api_key",
"openai_organization": "org_id",
"openai_api_base": "api_base",
"openai_proxy": "https://proxy.com",
},
)
|
# training schedule for 1x
train_cfg = dict(by_epoch=True, max_epochs=12)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
class CrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
"""
Computes the Cross Entropy Loss for a CrossEncoder model. This loss is used to train a model to predict the
correct class label for a given pair of sentences. The number of classes should be equal to the number of model
output labels.
Args:
model (:class:`~sentence_transformers.cross_encoder.CrossEncoder`): A CrossEncoder model to be trained.
activation_fct (:class:`~torch.nn.Module`): Activation function applied to the logits before computing the loss. Defaults to :class:`~torch.nn.Identity`.
**kwargs: Additional keyword arguments passed to the underlying :class:`torch.nn.CrossEntropyLoss`.
References:
- :class:`torch.nn.CrossEntropyLoss`
Requirements:
1. Your model can be initialized with `num_labels > 1` to predict multiple classes.
2. The number of dataset classes should be equal to the number of model output labels (`model.num_labels`).
Inputs:
+-------------------------------------------------+--------+-------------------------------+
| Texts | Labels | Number of Model Output Labels |
+=================================================+========+===============================+
| (sentence_A, sentence_B) pairs | class | `num_classes` |
+-------------------------------------------------+--------+-------------------------------+
Example:
::
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainer, losses
from datasets import Dataset
model = CrossEncoder("microsoft/mpnet-base", num_labels=2)
train_dataset = Dataset.from_dict({
"sentence1": ["How can I be a good geologist?", "What is the capital of France?"],
"sentence2": ["What should I do to be a great geologist?", "What is the capital of Germany?"],
"label": [1, 0], # 1: duplicate, 0: not duplicate
})
loss = losses.CrossEntropyLoss(model)
trainer = CrossEncoderTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.activation_fct = activation_fct
self.ce_loss = nn.CrossEntropyLoss(**kwargs)
if not isinstance(self.model, CrossEncoder):
raise ValueError(
f"{self.__class__.__name__} expects a model of type CrossEncoder, "
f"but got a model of type {type(self.model)}."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"CrossEntropyLoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0]
logits = self.activation_fct(logits)
loss = self.ce_loss(logits, labels)
return loss
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
# TODO: Consider the naming of this class
class CrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
super().__init__()
self.model = model
self.activation_fct = activation_fct
self.ce_loss = nn.CrossEntropyLoss(**kwargs)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"CrossEntropyLoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0]
logits = self.activation_fct(logits)
loss = self.ce_loss(logits, labels)
return loss
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
from .setup_env import set_multi_processing
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'find_latest_checkpoint', 'ManagerMeta', 'ManagerMixin',
'set_multi_processing'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'find_latest_checkpoint', 'ManagerMeta', 'ManagerMixin'
]
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import MongoDBAtlasDocumentIndex
from docarray.typing import NdArray
from . import NestedDoc, SimpleDoc, SimpleSchema, assert_when_ready
def test_find_simple_schema(simple_index_with_docs, n_dim): # noqa: F811
simple_index, random_simple_documents = simple_index_with_docs # noqa: F811
query = np.ones(n_dim)
# Insert one doc that identically matches query's embedding
expected_matching_document = SimpleSchema(embedding=query, text="other", number=10)
simple_index.index(expected_matching_document)
def pred():
docs, scores = simple_index.find(query, search_field='embedding', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert np.allclose(docs[0].embedding, expected_matching_document.embedding)
assert_when_ready(pred)
def test_find_empty_index(simple_index, n_dim): # noqa: F811
query = np.random.rand(n_dim)
def pred():
docs, scores = simple_index.find(query, search_field='embedding', limit=5)
assert len(docs) == 0
assert len(scores) == 0
assert_when_ready(pred)
def test_find_limit_larger_than_index(simple_index_with_docs, n_dim): # noqa: F811
simple_index, random_simple_documents = simple_index_with_docs # noqa: F811
query = np.ones(n_dim)
new_doc = SimpleSchema(embedding=query, text="other", number=10)
simple_index.index(new_doc)
def pred():
docs, scores = simple_index.find(query, search_field='embedding', limit=20)
assert len(docs) == 11
assert len(scores) == 11
assert_when_ready(pred)
def test_find_flat_schema(mongodb_index_config, n_dim): # noqa: F811
class FlatSchema(BaseDoc):
embedding1: NdArray = Field(dim=n_dim, index_name="vector_index_1")
# the dim and n_dim are setted different on propouse. to check the correct handling of n_dim
embedding2: NdArray[50] = Field(dim=n_dim, index_name="vector_index_2")
index = MongoDBAtlasDocumentIndex[FlatSchema](**mongodb_index_config)
index._collection.delete_many({})
index_docs = [
FlatSchema(embedding1=np.random.rand(n_dim), embedding2=np.random.rand(50))
for _ in range(10)
]
index_docs.append(FlatSchema(embedding1=np.zeros(n_dim), embedding2=np.ones(50)))
index_docs.append(FlatSchema(embedding1=np.ones(n_dim), embedding2=np.zeros(50)))
index.index(index_docs)
def pred1():
# find on embedding1
query = np.ones(n_dim)
docs, scores = index.find(query, search_field='embedding1', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert np.allclose(docs[0].embedding1, index_docs[-1].embedding1)
assert np.allclose(docs[0].embedding2, index_docs[-1].embedding2)
assert_when_ready(pred1)
def pred2():
# find on embedding2
query = np.ones(50)
docs, scores = index.find(query, search_field='embedding2', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert np.allclose(docs[0].embedding1, index_docs[-2].embedding1)
assert np.allclose(docs[0].embedding2, index_docs[-2].embedding2)
assert_when_ready(pred2)
def test_find_batches(simple_index_with_docs): # noqa: F811
simple_index, docs = simple_index_with_docs # noqa: F811
queries = np.array([np.random.rand(10) for _ in range(3)])
def pred():
resp = simple_index.find_batched(
queries=queries, search_field='embedding', limit=10
)
docs_responses = resp.documents
assert len(docs_responses) == 3
for matches in docs_responses:
assert len(matches) == 10
assert_when_ready(pred)
def test_find_nested_schema(nested_index_with_docs, n_dim): # noqa: F811
db, base_docs = nested_index_with_docs
query = NestedDoc(d=SimpleDoc(embedding=np.ones(n_dim)), embedding=np.ones(n_dim))
# find on root level
def pred():
docs, scores = db.find(query, search_field='embedding', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert np.allclose(docs[0].embedding, base_docs[-1].embedding)
# find on first nesting level
docs, scores = db.find(query, search_field='d__embedding', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert np.allclose(docs[0].d.embedding, base_docs[-2].d.embedding)
assert_when_ready(pred)
def test_find_schema_without_index(mongodb_index_config, n_dim): # noqa: F811
class Schema(BaseDoc):
vec: NdArray = Field(dim=n_dim)
index = MongoDBAtlasDocumentIndex[Schema](**mongodb_index_config)
query = np.ones(n_dim)
with pytest.raises(ValueError):
index.find(query, search_field='vec', limit=2)
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import MongoDBAtlasDocumentIndex
from docarray.typing import NdArray
from . import NestedDoc, SimpleDoc, SimpleSchema, assert_when_ready
N_DIM = 10
def test_find_simple_schema(simple_index_with_docs): # noqa: F811
simple_index, random_simple_documents = simple_index_with_docs # noqa: F811
query = np.ones(N_DIM)
# Insert one doc that identically matches query's embedding
expected_matching_document = SimpleSchema(embedding=query, text="other", number=10)
simple_index.index(expected_matching_document)
def pred():
docs, scores = simple_index.find(query, search_field='embedding', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert np.allclose(docs[0].embedding, expected_matching_document.embedding)
assert_when_ready(pred)
def test_find_empty_index(simple_index): # noqa: F811
query = np.random.rand(N_DIM)
def pred():
docs, scores = simple_index.find(query, search_field='embedding', limit=5)
assert len(docs) == 0
assert len(scores) == 0
assert_when_ready(pred)
def test_find_limit_larger_than_index(simple_index_with_docs): # noqa: F811
simple_index, random_simple_documents = simple_index_with_docs # noqa: F811
query = np.ones(N_DIM)
new_doc = SimpleSchema(embedding=query, text="other", number=10)
simple_index.index(new_doc)
def pred():
docs, scores = simple_index.find(query, search_field='embedding', limit=20)
assert len(docs) == 11
assert len(scores) == 11
assert_when_ready(pred)
def test_find_flat_schema(mongodb_index_config): # noqa: F811
class FlatSchema(BaseDoc):
embedding1: NdArray = Field(dim=N_DIM, index_name="vector_index_1")
# the dim and N_DIM are setted different on propouse. to check the correct handling of n_dim
embedding2: NdArray[50] = Field(dim=N_DIM, index_name="vector_index_2")
index = MongoDBAtlasDocumentIndex[FlatSchema](**mongodb_index_config)
index._doc_collection.delete_many({})
index_docs = [
FlatSchema(embedding1=np.random.rand(N_DIM), embedding2=np.random.rand(50))
for _ in range(10)
]
index_docs.append(FlatSchema(embedding1=np.zeros(N_DIM), embedding2=np.ones(50)))
index_docs.append(FlatSchema(embedding1=np.ones(N_DIM), embedding2=np.zeros(50)))
index.index(index_docs)
def pred1():
# find on embedding1
query = np.ones(N_DIM)
docs, scores = index.find(query, search_field='embedding1', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert np.allclose(docs[0].embedding1, index_docs[-1].embedding1)
assert np.allclose(docs[0].embedding2, index_docs[-1].embedding2)
assert_when_ready(pred1)
def pred2():
# find on embedding2
query = np.ones(50)
docs, scores = index.find(query, search_field='embedding2', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert np.allclose(docs[0].embedding1, index_docs[-2].embedding1)
assert np.allclose(docs[0].embedding2, index_docs[-2].embedding2)
assert_when_ready(pred2)
def test_find_batches(simple_index_with_docs): # noqa: F811
simple_index, docs = simple_index_with_docs # noqa: F811
queries = np.array([np.random.rand(10) for _ in range(3)])
def pred():
resp = simple_index.find_batched(
queries=queries, search_field='embedding', limit=10
)
docs_responses = resp.documents
assert len(docs_responses) == 3
for matches in docs_responses:
assert len(matches) == 10
assert_when_ready(pred)
def test_find_nested_schema(nested_index_with_docs): # noqa: F811
db, base_docs = nested_index_with_docs
query = NestedDoc(d=SimpleDoc(embedding=np.ones(N_DIM)), embedding=np.ones(N_DIM))
# find on root level
def pred():
docs, scores = db.find(query, search_field='embedding', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert np.allclose(docs[0].embedding, base_docs[-1].embedding)
# find on first nesting level
docs, scores = db.find(query, search_field='d__embedding', limit=5)
assert len(docs) == 5
assert len(scores) == 5
assert np.allclose(docs[0].d.embedding, base_docs[-2].d.embedding)
assert_when_ready(pred)
def test_find_schema_without_index(mongodb_index_config): # noqa: F811
class Schema(BaseDoc):
vec: NdArray = Field(dim=N_DIM)
index = MongoDBAtlasDocumentIndex[Schema](**mongodb_index_config)
query = np.ones(N_DIM)
with pytest.raises(ValueError):
index.find(query, search_field='vec', limit=2)
|
from enum import Enum
from typing import Any, Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self) -> Dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
from enum import Enum
from typing import Any, Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self) -> Dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import (
AmazonTextractPDFLoader,
MathpixPDFLoader,
OnlinePDFLoader,
PagedPDFSplitter,
PDFMinerLoader,
PDFMinerPDFasHTMLLoader,
PDFPlumberLoader,
PyMuPDFLoader,
PyPDFDirectoryLoader,
PyPDFium2Loader,
UnstructuredPDFLoader,
)
from langchain_community.document_loaders.pdf import (
BasePDFLoader,
DocumentIntelligenceLoader,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"UnstructuredPDFLoader": "langchain_community.document_loaders",
"BasePDFLoader": "langchain_community.document_loaders.pdf",
"OnlinePDFLoader": "langchain_community.document_loaders",
"PagedPDFSplitter": "langchain_community.document_loaders",
"PyPDFium2Loader": "langchain_community.document_loaders",
"PyPDFDirectoryLoader": "langchain_community.document_loaders",
"PDFMinerLoader": "langchain_community.document_loaders",
"PDFMinerPDFasHTMLLoader": "langchain_community.document_loaders",
"PyMuPDFLoader": "langchain_community.document_loaders",
"MathpixPDFLoader": "langchain_community.document_loaders",
"PDFPlumberLoader": "langchain_community.document_loaders",
"AmazonTextractPDFLoader": "langchain_community.document_loaders",
"DocumentIntelligenceLoader": "langchain_community.document_loaders.pdf",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AmazonTextractPDFLoader",
"BasePDFLoader",
"DocumentIntelligenceLoader",
"MathpixPDFLoader",
"OnlinePDFLoader",
"PDFMinerLoader",
"PDFMinerPDFasHTMLLoader",
"PDFPlumberLoader",
"PagedPDFSplitter",
"PyMuPDFLoader",
"PyPDFDirectoryLoader",
"PyPDFium2Loader",
"UnstructuredPDFLoader",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import (
AmazonTextractPDFLoader,
MathpixPDFLoader,
OnlinePDFLoader,
PagedPDFSplitter,
PDFMinerLoader,
PDFMinerPDFasHTMLLoader,
PDFPlumberLoader,
PyMuPDFLoader,
PyPDFDirectoryLoader,
PyPDFium2Loader,
UnstructuredPDFLoader,
)
from langchain_community.document_loaders.pdf import (
BasePDFLoader,
DocumentIntelligenceLoader,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"UnstructuredPDFLoader": "langchain_community.document_loaders",
"BasePDFLoader": "langchain_community.document_loaders.pdf",
"OnlinePDFLoader": "langchain_community.document_loaders",
"PagedPDFSplitter": "langchain_community.document_loaders",
"PyPDFium2Loader": "langchain_community.document_loaders",
"PyPDFDirectoryLoader": "langchain_community.document_loaders",
"PDFMinerLoader": "langchain_community.document_loaders",
"PDFMinerPDFasHTMLLoader": "langchain_community.document_loaders",
"PyMuPDFLoader": "langchain_community.document_loaders",
"MathpixPDFLoader": "langchain_community.document_loaders",
"PDFPlumberLoader": "langchain_community.document_loaders",
"AmazonTextractPDFLoader": "langchain_community.document_loaders",
"DocumentIntelligenceLoader": "langchain_community.document_loaders.pdf",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"UnstructuredPDFLoader",
"BasePDFLoader",
"OnlinePDFLoader",
"PagedPDFSplitter",
"PyPDFium2Loader",
"PyPDFDirectoryLoader",
"PDFMinerLoader",
"PDFMinerPDFasHTMLLoader",
"PyMuPDFLoader",
"MathpixPDFLoader",
"PDFPlumberLoader",
"AmazonTextractPDFLoader",
"DocumentIntelligenceLoader",
]
|
import PIL.Image
import pytest
import torch
import torchvision.transforms.v2._utils
from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_mask, make_image
from torchvision import tv_tensors
from torchvision.transforms.v2._utils import has_all, has_any
from torchvision.transforms.v2.functional import to_pil_image
IMAGE = make_image(DEFAULT_SIZE, color_space="RGB")
BOUNDING_BOX = make_bounding_boxes(DEFAULT_SIZE, format=tv_tensors.BoundingBoxFormat.XYXY)
MASK = make_detection_mask(DEFAULT_SIZE)
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.BoundingBoxes,), True),
((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.BoundingBoxes), True),
((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True),
((MASK,), (tv_tensors.Image, tv_tensors.BoundingBoxes), False),
((BOUNDING_BOX,), (tv_tensors.Image, tv_tensors.Mask), False),
((IMAGE,), (tv_tensors.BoundingBoxes, tv_tensors.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask),
True,
),
((), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda obj: isinstance(obj, tv_tensors.Image),), True),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
((IMAGE,), (tv_tensors.Image, PIL.Image.Image, torchvision.transforms.v2._utils.is_pure_tensor), True),
(
(torch.Tensor(IMAGE),),
(tv_tensors.Image, PIL.Image.Image, torchvision.transforms.v2._utils.is_pure_tensor),
True,
),
(
(to_pil_image(IMAGE),),
(tv_tensors.Image, PIL.Image.Image, torchvision.transforms.v2._utils.is_pure_tensor),
True,
),
],
)
def test_has_any(sample, types, expected):
assert has_any(sample, *types) is expected
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.BoundingBoxes,), True),
((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.BoundingBoxes), True),
((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (tv_tensors.BoundingBoxes, tv_tensors.Mask), True),
(
(IMAGE, BOUNDING_BOX, MASK),
(tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask),
True,
),
((BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.BoundingBoxes), False),
((BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.Mask), False),
((IMAGE, MASK), (tv_tensors.BoundingBoxes, tv_tensors.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask),
True,
),
((BOUNDING_BOX, MASK), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask), False),
((IMAGE, MASK), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask), False),
((IMAGE, BOUNDING_BOX), (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(lambda obj: isinstance(obj, (tv_tensors.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask)),),
True,
),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
],
)
def test_has_all(sample, types, expected):
assert has_all(sample, *types) is expected
|
import PIL.Image
import pytest
import torch
import torchvision.transforms.v2._utils
from common_utils import DEFAULT_SIZE, make_bounding_boxes, make_detection_mask, make_image
from torchvision import datapoints
from torchvision.transforms.v2._utils import has_all, has_any
from torchvision.transforms.v2.functional import to_pil_image
IMAGE = make_image(DEFAULT_SIZE, color_space="RGB")
BOUNDING_BOX = make_bounding_boxes(DEFAULT_SIZE, format=datapoints.BoundingBoxFormat.XYXY)
MASK = make_detection_mask(DEFAULT_SIZE)
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes, datapoints.Mask), True),
((MASK,), (datapoints.Image, datapoints.BoundingBoxes), False),
((BOUNDING_BOX,), (datapoints.Image, datapoints.Mask), False),
((IMAGE,), (datapoints.BoundingBoxes, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask),
True,
),
((), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda obj: isinstance(obj, datapoints.Image),), True),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
((IMAGE,), (datapoints.Image, PIL.Image.Image, torchvision.transforms.v2._utils.is_pure_tensor), True),
(
(torch.Tensor(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.transforms.v2._utils.is_pure_tensor),
True,
),
(
(to_pil_image(IMAGE),),
(datapoints.Image, PIL.Image.Image, torchvision.transforms.v2._utils.is_pure_tensor),
True,
),
],
)
def test_has_any(sample, types, expected):
assert has_any(sample, *types) is expected
@pytest.mark.parametrize(
("sample", "types", "expected"),
[
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Mask,), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), True),
((IMAGE, BOUNDING_BOX, MASK), (datapoints.BoundingBoxes, datapoints.Mask), True),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes), False),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.BoundingBoxes, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask),
True,
),
((BOUNDING_BOX, MASK), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
((IMAGE, MASK), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
((IMAGE, BOUNDING_BOX), (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask), False),
(
(IMAGE, BOUNDING_BOX, MASK),
(lambda obj: isinstance(obj, (datapoints.Image, datapoints.BoundingBoxes, datapoints.Mask)),),
True,
),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: False,), False),
((IMAGE, BOUNDING_BOX, MASK), (lambda _: True,), True),
],
)
def test_has_all(sample, types, expected):
assert has_all(sample, *types) is expected
|
"""Test ChatDeepSeek chat model."""
from __future__ import annotations
from typing import Optional
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_deepseek.chat_models import ChatDeepSeek
class TestChatDeepSeek(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[ChatDeepSeek]:
return ChatDeepSeek
@property
def chat_model_params(self) -> dict:
# These should be parameters used to initialize your integration for testing
return {
"model": "deepseek-chat",
"temperature": 0,
}
@property
def supports_json_mode(self) -> bool:
"""(bool) whether the chat model supports JSON mode."""
return True
@pytest.mark.xfail(reason="Not yet supported.")
def test_tool_message_histories_list_content(
self,
model: BaseChatModel,
my_adder_tool: BaseTool,
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content() -> None:
"""Test reasoning content."""
chat_model = ChatDeepSeek(model="deepseek-reasoner")
response = chat_model.invoke("What is 3^3?")
assert response.content
assert response.additional_kwargs["reasoning_content"]
raise ValueError
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content_streaming() -> None:
chat_model = ChatDeepSeek(model="deepseek-reasoner")
full: Optional[BaseMessageChunk] = None
for chunk in chat_model.stream("What is 3^3?"):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.additional_kwargs["reasoning_content"]
|
"""Test ChatDeepSeek chat model."""
from typing import Optional
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_deepseek.chat_models import ChatDeepSeek
class TestChatDeepSeek(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[ChatDeepSeek]:
return ChatDeepSeek
@property
def chat_model_params(self) -> dict:
# These should be parameters used to initialize your integration for testing
return {
"model": "deepseek-chat",
"temperature": 0,
}
@property
def supports_json_mode(self) -> bool:
"""(bool) whether the chat model supports JSON mode."""
return True
@pytest.mark.xfail(reason="Not yet supported.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content() -> None:
"""Test reasoning content."""
chat_model = ChatDeepSeek(model="deepseek-reasoner")
response = chat_model.invoke("What is 3^3?")
assert response.content
assert response.additional_kwargs["reasoning_content"]
raise ValueError()
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content_streaming() -> None:
chat_model = ChatDeepSeek(model="deepseek-reasoner")
full: Optional[BaseMessageChunk] = None
for chunk in chat_model.stream("What is 3^3?"):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.additional_kwargs["reasoning_content"]
|
from typing import TYPE_CHECKING
from backend.integrations.oauth.todoist import TodoistOAuthHandler
from .github import GitHubOAuthHandler
from .google import GoogleOAuthHandler
from .linear import LinearOAuthHandler
from .notion import NotionOAuthHandler
from .twitter import TwitterOAuthHandler
if TYPE_CHECKING:
from ..providers import ProviderName
from .base import BaseOAuthHandler
# --8<-- [start:HANDLERS_BY_NAMEExample]
HANDLERS_BY_NAME: dict["ProviderName", type["BaseOAuthHandler"]] = {
handler.PROVIDER_NAME: handler
for handler in [
GitHubOAuthHandler,
GoogleOAuthHandler,
NotionOAuthHandler,
TwitterOAuthHandler,
LinearOAuthHandler,
TodoistOAuthHandler,
]
}
# --8<-- [end:HANDLERS_BY_NAMEExample]
__all__ = ["HANDLERS_BY_NAME"]
|
from typing import TYPE_CHECKING
from .github import GitHubOAuthHandler
from .google import GoogleOAuthHandler
from .linear import LinearOAuthHandler
from .notion import NotionOAuthHandler
from .twitter import TwitterOAuthHandler
if TYPE_CHECKING:
from ..providers import ProviderName
from .base import BaseOAuthHandler
# --8<-- [start:HANDLERS_BY_NAMEExample]
HANDLERS_BY_NAME: dict["ProviderName", type["BaseOAuthHandler"]] = {
handler.PROVIDER_NAME: handler
for handler in [
GitHubOAuthHandler,
GoogleOAuthHandler,
NotionOAuthHandler,
TwitterOAuthHandler,
LinearOAuthHandler,
]
}
# --8<-- [end:HANDLERS_BY_NAMEExample]
__all__ = ["HANDLERS_BY_NAME"]
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEmbeddingSimilarityEvaluator,
SparseEncoder,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
|
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEmbeddingSimilarityEvaluator,
SparseEncoder,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
print(dev_evaluator.primary_metric)
print(results[dev_evaluator.primary_metric])
|
_base_ = './fovea_r50_fpn_4xb4-1x_coco.py'
model = dict(
bbox_head=dict(
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
_base_ = './fovea_r50_fpn_4x4_1x_coco.py'
model = dict(
bbox_head=dict(
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
_base_ = './faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
# learning policy
lr_config = dict(step=[16, 23])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 23])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from PIL import Image
from sentence_transformers import SentenceTransformer, models, util
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPModel, CLIPProcessor
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
inputs = processor(text=["a cat", "a dog"], images=[image], return_tensors="pt", padding=True)
output = model(**inputs)
# vision_outputs = model.vision_model(pixel_values=inputs['pixel_values'])
# image_embeds = model.visual_projection(vision_outputs[1])
# print(image_embeds.shape)
# exit()
# Load CLIP model
clip = models.CLIPModel()
model = SentenceTransformer(modules=[clip])
model.save("tmp-clip-model")
model = SentenceTransformer("tmp-clip-model")
# Encode an image:
img_emb = model.encode(Image.open("two_dogs_in_snow.jpg"))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)
print(cos_scores)
|
from PIL import Image
from sentence_transformers import SentenceTransformer, models, util
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPModel, CLIPProcessor
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
inputs = processor(texts=["a cat", "a dog"], images=[image], return_tensors="pt", padding=True)
output = model(**inputs)
# vision_outputs = model.vision_model(pixel_values=inputs['pixel_values'])
# image_embeds = model.visual_projection(vision_outputs[1])
# print(image_embeds.shape)
# exit()
# Load CLIP model
clip = models.CLIPModel()
model = SentenceTransformer(modules=[clip])
model.save("tmp-clip-model")
model = SentenceTransformer("tmp-clip-model")
# Encode an image:
img_emb = model.encode(Image.open("two_dogs_in_snow.jpg"))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)
print(cos_scores)
|
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
T = TypeVar('T', bound='PointCloud3DUrl')
@_register_proto(proto_type_name='point_cloud_url')
class PointCloud3DUrl(Url3D):
"""
URL to a file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def load(
self: T,
samples: int,
multiple_geometries: bool = False,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'PointsAndColors':
"""
Load the data from the url into an NdArray containing point cloud information.
---
```python
import numpy as np
from docarray import BaseDoc
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDoc):
point_cloud_url: PointCloud3DUrl
doc = MyDoc(point_cloud_url="toydata/tetrahedron.obj")
# point_cloud = doc.point_cloud_url.load(samples=100)
# assert isinstance(point_cloud, np.ndarray)
# assert point_cloud.shape == (100, 3)
```
---
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:param skip_materials: Skip materials if True, else load.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: np.ndarray representing the point cloud
"""
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
if not trimesh_args:
trimesh_args = {}
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(
force='scene', skip_materials=skip_materials, **trimesh_args
)
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh', **trimesh_args)
point_cloud = np.array(mesh.sample(samples))
points = parse_obj_as(NdArray, point_cloud)
return PointsAndColors(points=points, colors=None)
def display(
self,
samples: int = 10000,
) -> None:
"""
Plot point cloud from url.
First, it loads the point cloud into a :class:`PointsAndColors` object, and then
calls display on it. The following is therefore equivalent:
.. code-block:: python
import numpy as np
from docarray import BaseDoc
from docarray.documents import PointCloud3D
pc = PointCloud3D("toydata/tetrahedron.obj")
# option 1
pc.url.display()
# option 2 (equivalent)
pc.url.load(samples=10000).display()
:param samples: number of points to sample from the mesh.
"""
self.load(samples=samples, skip_materials=False).display()
|
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
T = TypeVar('T', bound='PointCloud3DUrl')
@_register_proto(proto_type_name='point_cloud_url')
class PointCloud3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def load(
self: T,
samples: int,
multiple_geometries: bool = False,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'PointsAndColors':
"""
Load the data from the url into an NdArray containing point cloud information.
---
```python
import numpy as np
from docarray import BaseDoc
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDoc):
point_cloud_url: PointCloud3DUrl
doc = MyDoc(point_cloud_url="toydata/tetrahedron.obj")
# point_cloud = doc.point_cloud_url.load(samples=100)
# assert isinstance(point_cloud, np.ndarray)
# assert point_cloud.shape == (100, 3)
```
---
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:param skip_materials: Skip materials if True, else load.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: np.ndarray representing the point cloud
"""
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
if not trimesh_args:
trimesh_args = {}
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(
force='scene', skip_materials=skip_materials, **trimesh_args
)
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh', **trimesh_args)
point_cloud = np.array(mesh.sample(samples))
points = parse_obj_as(NdArray, point_cloud)
return PointsAndColors(points=points, colors=None)
def display(
self,
samples: int = 10000,
) -> None:
"""
Plot point cloud from url.
First, it loads the point cloud into a :class:`PointsAndColors` object, and then
calls display on it. The following is therefore equivalent:
.. code-block:: python
import numpy as np
from docarray import BaseDoc
from docarray.documents import PointCloud3D
pc = PointCloud3D("toydata/tetrahedron.obj")
# option 1
pc.url.display()
# option 2 (equivalent)
pc.url.load(samples=10000).display()
:param samples: number of points to sample from the mesh.
"""
self.load(samples=samples, skip_materials=False).display()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import GoogleSerperResults, GoogleSerperRun
"""Google Serper API Toolkit."""
"""Tool for the Serer.dev Google Search API."""
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleSerperRun": "langchain_community.tools",
"GoogleSerperResults": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleSerperResults",
"GoogleSerperRun",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import GoogleSerperResults, GoogleSerperRun
"""Google Serper API Toolkit."""
"""Tool for the Serer.dev Google Search API."""
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GoogleSerperRun": "langchain_community.tools",
"GoogleSerperResults": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GoogleSerperRun",
"GoogleSerperResults",
]
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
assert len(self.source_sentences) == len(self.target_sentences)
if name:
name = "_" + name
self.csv_file = "translation_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "src2trg", "trg2src"]
self.primary_metric = "mean_accuracy"
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path, epoch, steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
"""Standard LangChain interface tests"""
import base64
from pathlib import Path
from typing import Literal, cast
import httpx
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, HumanMessage
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import ChatOpenAI
REPO_ROOT_DIR = Path(__file__).parents[6]
class TestOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o-mini", "stream_usage": True}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> dict[
Literal["invoke", "stream"],
list[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {"invoke": ["reasoning_output", "cache_read_input"], "stream": []}
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
llm = ChatOpenAI(model="gpt-4o-mini", stream_usage=True)
_invoke(llm, input_, stream)
# invoke twice so first invocation is cached
return _invoke(llm, input_, stream)
def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage:
llm = ChatOpenAI(model="o1-mini", stream_usage=True, temperature=1)
input_ = (
"explain the relationship between the 2008/9 economic crisis and the "
"startup ecosystem in the early 2010s"
)
return _invoke(llm, input_, stream)
@property
def supports_pdf_inputs(self) -> bool:
# OpenAI requires a filename for PDF inputs
# For now, we test with filename in OpenAI-specific tests
return False
def test_openai_pdf_inputs(self, model: BaseChatModel) -> None:
"""Test that the model can process PDF inputs."""
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
pdf_data = base64.b64encode(httpx.get(url).content).decode("utf-8")
message = HumanMessage(
[
{"type": "text", "text": "Summarize this document:"},
{
"type": "file",
"source_type": "base64",
"mime_type": "application/pdf",
"data": pdf_data,
"filename": "my-pdf", # OpenAI requires a filename
},
]
)
_ = model.invoke([message])
def _invoke(llm: ChatOpenAI, input_: str, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
@pytest.mark.skip() # Test either finishes in 5 seconds or 5 minutes.
def test_audio_model() -> None:
class AudioModelTests(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[ChatOpenAI]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"model": "gpt-4o-audio-preview",
"temperature": 0,
"model_kwargs": {
"modalities": ["text", "audio"],
"audio": {"voice": "alloy", "format": "wav"},
},
}
@property
def supports_audio_inputs(self) -> bool:
return True
test_instance = AudioModelTests()
model = test_instance.chat_model_class(**test_instance.chat_model_params)
AudioModelTests().test_audio_inputs(model)
|
"""Standard LangChain interface tests"""
import base64
from pathlib import Path
from typing import Literal, cast
import httpx
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, HumanMessage
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import ChatOpenAI
REPO_ROOT_DIR = Path(__file__).parents[6]
class TestOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o-mini", "stream_usage": True}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> dict[
Literal["invoke", "stream"],
list[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {"invoke": ["reasoning_output", "cache_read_input"], "stream": []}
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
llm = ChatOpenAI(model="gpt-4o-mini", stream_usage=True)
_invoke(llm, input_, stream)
# invoke twice so first invocation is cached
return _invoke(llm, input_, stream)
def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage:
llm = ChatOpenAI(model="o1-mini", stream_usage=True, temperature=1)
input_ = (
"explain the relationship between the 2008/9 economic crisis and the "
"startup ecosystem in the early 2010s"
)
return _invoke(llm, input_, stream)
@property
def supports_pdf_inputs(self) -> bool:
# OpenAI requires a filename for PDF inputs
# For now, we test with filename in OpenAI-specific tests
return False
def test_openai_pdf_inputs(self, model: BaseChatModel) -> None:
"""Test that the model can process PDF inputs."""
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
pdf_data = base64.b64encode(httpx.get(url).content).decode("utf-8")
message = HumanMessage(
[
{"type": "text", "text": "Summarize this document:"},
{
"type": "file",
"source_type": "base64",
"mime_type": "application/pdf",
"data": pdf_data,
"filename": "my-pdf", # OpenAI requires a filename
},
]
)
_ = model.invoke([message])
def _invoke(llm: ChatOpenAI, input_: str, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
|
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
T = TypeVar('T', bound='PointCloud3DUrl')
@_register_proto(proto_type_name='point_cloud_url')
class PointCloud3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def load(
self: T,
samples: int,
multiple_geometries: bool = False,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'PointsAndColors':
"""
Load the data from the url into an NdArray containing point cloud information.
---
```python
import numpy as np
from docarray import BaseDoc
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDoc):
point_cloud_url: PointCloud3DUrl
doc = MyDoc(point_cloud_url="toydata/tetrahedron.obj")
# point_cloud = doc.point_cloud_url.load(samples=100)
# assert isinstance(point_cloud, np.ndarray)
# assert point_cloud.shape == (100, 3)
```
---
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:param skip_materials: Skip materials if True, else load.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: np.ndarray representing the point cloud
"""
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
if not trimesh_args:
trimesh_args = {}
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(
force='scene', skip_materials=skip_materials, **trimesh_args
)
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh', **trimesh_args)
point_cloud = np.array(mesh.sample(samples))
points = parse_obj_as(NdArray, point_cloud)
return PointsAndColors(points=points, colors=None)
def display(
self,
samples: int = 10000,
) -> None:
"""
Plot point cloud from url.
First, it loads the point cloud into a :class:`PointsAndColors` object, and then
calls display on it. The following is therefore equivalent:
.. code-block:: python
import numpy as np
from docarray import BaseDoc
from docarray.documents import PointCloud3D
pc = PointCloud3D("toydata/tetrahedron.obj")
# option 1
pc.url.display()
# option 2 (equivalent)
pc.url.load(samples=10000).display()
:param samples: number of points to sample from the mesh.
"""
self.load(samples=samples, skip_materials=False).display()
|
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
T = TypeVar('T', bound='PointCloud3DUrl')
@_register_proto(proto_type_name='point_cloud_url')
class PointCloud3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def load(
self: T,
samples: int,
multiple_geometries: bool = False,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'PointsAndColors':
"""
Load the data from the url into an NdArray containing point cloud information.
---
```python
import numpy as np
from docarray import BaseDoc
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDoc):
point_cloud_url: PointCloud3DUrl
doc = MyDoc(point_cloud_url="toydata/tetrahedron.obj")
# point_cloud = doc.point_cloud_url.load(samples=100)
# assert isinstance(point_cloud, np.ndarray)
# assert point_cloud.shape == (100, 3)
```
---
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:param skip_materials: Skip materials if True, else load.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: np.ndarray representing the point cloud
"""
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
if not trimesh_args:
trimesh_args = {}
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(
force='scene', skip_materials=skip_materials, **trimesh_args
)
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh', **trimesh_args)
point_cloud = np.array(mesh.sample(samples))
points = parse_obj_as(NdArray, point_cloud)
return PointsAndColors(points=points, colors=None)
def display(
self,
samples: int = 10000,
) -> None:
"""
Plot point cloud from url.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
First, it loads the point cloud into a :class:`PointsAndColors` object, and then
calls display on it. The following is therefore equivalent:
.. code-block:: python
import numpy as np
from docarray import BaseDoc
from docarray.documents import PointCloud3D
pc = PointCloud3D("toydata/tetrahedron.obj")
# option 1
pc.url.display()
# option 2 (equivalent)
pc.url.load(samples=10000).display()
:param samples: number of points to sample from the mesh.
"""
self.load(samples=samples, skip_materials=False).display()
|
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jina_cli.export import api_to_dict
from jina_cli.lookup import _build_lookup_table, lookup_and_print
from tests.helper import _generate_pod_args
def test_export_api(tmpdir):
with open(tmpdir / 'test.yml', 'w', encoding='utf8') as fp:
JAML.dump(api_to_dict(), fp)
with open(tmpdir / 'test.json', 'w', encoding='utf8') as fp:
json.dump(api_to_dict(), fp)
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_help_lookup(cli, capsys):
nkw2kw, kw2info = _build_lookup_table()
if cli not in {'--help', '--version', '--version-full'}:
assert cli in nkw2kw
lookup_and_print(cli)
captured = capsys.readouterr()
assert 'Traceback (most recent call last)' not in captured.out
def test_main_cli():
subprocess.check_call(['jina'])
def test_cli_help():
subprocess.check_call(['jina', 'help', 'deployment'])
@pytest.mark.parametrize(
'uses', ['jinaai://jina-ai/DummyHubExecutor']
)
def test_cli_hub(uses):
subprocess.check_call(['jina', 'hub', '--help'])
for cmd in ['new', 'status', 'pull', 'push']:
subprocess.check_call(['jina', 'hub', cmd, '--help'])
subprocess.check_call(['jina', 'hub', 'pull', uses])
def test_cli_warn_unknown_args():
subprocess.check_call(['jina', 'help', 'deployment', '--abcdefg'])
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_all_cli(cli):
subprocess.check_call(['jina', cli, '--help'])
@pytest.mark.parametrize('smethod', ['fork', 'spawn'])
def test_all_start_method(smethod):
s = subprocess.check_output(
['jina', '-v'],
env=dict(os.environ, JINA_MP_START_METHOD=smethod),
stderr=subprocess.STDOUT,
)
assert 'UserWarning' in s.decode()
assert smethod in s.decode()
def test_help_non_exist():
s = subprocess.check_output(
['jina', 'help', 'abcdefg'],
stderr=subprocess.STDOUT,
)
assert 'misspelling' in s.decode()
def test_help_exist():
s = subprocess.check_output(
['jina', 'help', 'port'],
stderr=subprocess.STDOUT,
)
assert 'a CLI argument of Jina' in s.decode()
def test_parse_env_map():
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', '--env', 'key2=value2']
)
assert a.env == {'key1': 'value1', 'key2': 'value2'}
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', 'key2=value2', 'key3=3']
)
assert a.env == {'key1': 'value1', 'key2': 'value2', 'key3': 3}
@pytest.mark.slow
def test_ping():
a1 = _generate_pod_args()
a2 = set_ping_parser().parse_args(['executor', f'0.0.0.0:{a1.port[0]}'])
a3 = set_ping_parser().parse_args(
['executor', f'0.0.0.1:{a1.port[0]}', '--timeout', '1000']
)
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a2)
assert cm.value.code == 0
# test with bad address
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a3)
assert cm.value.code == 1
@pytest.mark.parametrize(
'cmd',
[
['jina', 'ping', 'flow', '127.0.0.1:8080'],
['jina', 'help', 'port'],
['jina', 'hub'],
],
)
def test_logo_silence(cmd):
from jina.constants import __resources_path__
with open(os.path.join(__resources_path__, 'jina.logo')) as fp:
logo_str = fp.read()
s = subprocess.run(
cmd,
stdout=subprocess.PIPE,
)
assert logo_str not in s.stdout.decode()
|
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jina_cli.export import api_to_dict
from jina_cli.lookup import _build_lookup_table, lookup_and_print
from tests.helper import _generate_pod_args
def test_export_api(tmpdir):
with open(tmpdir / 'test.yml', 'w', encoding='utf8') as fp:
JAML.dump(api_to_dict(), fp)
with open(tmpdir / 'test.json', 'w', encoding='utf8') as fp:
json.dump(api_to_dict(), fp)
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_help_lookup(cli, capsys):
nkw2kw, kw2info = _build_lookup_table()
if cli not in {'--help', '--version', '--version-full'}:
assert cli in nkw2kw
lookup_and_print(cli)
captured = capsys.readouterr()
assert 'Traceback (most recent call last)' not in captured.out
def test_main_cli():
subprocess.check_call(['jina'])
def test_cli_help():
subprocess.check_call(['jina', 'help', 'deployment'])
@pytest.mark.parametrize(
'uses', ['jinaai://jina-ai/DummyHubExecutor']
)
def test_cli_hub(uses):
subprocess.check_call(['jina', 'hub', '--help'])
for cmd in ['new', 'status', 'pull', 'push']:
subprocess.check_call(['jina', 'hub', cmd, '--help'])
subprocess.check_call(['jina', 'hub', 'pull', uses])
def test_cli_warn_unknown_args():
subprocess.check_call(['jina', 'help', 'deployment', '--abcdefg'])
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_all_cli(cli):
subprocess.check_call(['jina', cli, '--help'])
@pytest.mark.parametrize('smethod', ['fork', 'spawn'])
def test_all_start_method(smethod):
s = subprocess.check_output(
['jina', '-v'],
env=dict(os.environ, JINA_MP_START_METHOD=smethod),
stderr=subprocess.STDOUT,
)
assert 'UserWarning' in s.decode()
assert smethod in s.decode()
def test_help_non_exist():
s = subprocess.check_output(
['jina', 'help', 'abcdefg'],
stderr=subprocess.STDOUT,
)
assert 'misspelling' in s.decode()
def test_help_exist():
s = subprocess.check_output(
['jina', 'help', 'port'],
stderr=subprocess.STDOUT,
)
assert 'a CLI argument of Jina' in s.decode()
def test_parse_env_map():
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', '--env', 'key2=value2']
)
assert a.env == {'key1': 'value1', 'key2': 'value2'}
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', 'key2=value2', 'key3=3']
)
assert a.env == {'key1': 'value1', 'key2': 'value2', 'key3': 3}
@pytest.mark.slow
def test_ping():
a1 = _generate_pod_args()
a2 = set_ping_parser().parse_args(['executor', f'0.0.0.0:{a1.port}'])
a3 = set_ping_parser().parse_args(
['executor', f'0.0.0.1:{a1.port}', '--timeout', '1000']
)
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a2)
assert cm.value.code == 0
# test with bad address
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a3)
assert cm.value.code == 1
@pytest.mark.parametrize(
'cmd',
[
['jina', 'ping', 'flow', '127.0.0.1:8080'],
['jina', 'help', 'port'],
['jina', 'hub'],
],
)
def test_logo_silence(cmd):
from jina.constants import __resources_path__
with open(os.path.join(__resources_path__, 'jina.logo')) as fp:
logo_str = fp.read()
s = subprocess.run(
cmd,
stdout=subprocess.PIPE,
)
assert logo_str not in s.stdout.decode()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .param_scheduler_hook import ParamSchedulerHook
from .profiler_hook import ProfilerHook
from .runtime_info_hook import RuntimeInfoHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
from .test_time_aug_hook import PrepareTTAHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook', 'LoggerHook',
'NaiveVisualizationHook', 'EMAHook', 'RuntimeInfoHook', 'ProfilerHook',
'PrepareTTAHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .param_scheduler_hook import ParamSchedulerHook
from .profiler_hook import ProfilerHook
from .runtime_info_hook import RuntimeInfoHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook', 'LoggerHook',
'NaiveVisualizationHook', 'EMAHook', 'RuntimeInfoHook', 'ProfilerHook'
]
|
from __future__ import annotations
from collections.abc import Iterable
from typing import TYPE_CHECKING
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluator, "primary_metric"):
scores.append(evaluation[evaluator.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
from __future__ import annotations
from collections.abc import Iterable
from typing import TYPE_CHECKING
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
"""
Initializes a SequentialEvaluator object.
Args:
evaluators (Iterable[SentenceEvaluator]): A collection of SentenceEvaluator objects.
main_score_function (function, optional): A function that takes a list of scores and returns the main score.
Defaults to selecting the last score in the list.
Example:
::
evaluator1 = BinaryClassificationEvaluator(...)
evaluator2 = InformationRetrievalEvaluator(...)
evaluator3 = MSEEvaluator(...)
seq_evaluator = SequentialEvaluator([evaluator1, evaluator2, evaluator3])
"""
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluator, "primary_metric"):
scores.append(evaluation[evaluator.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
from typing import Any
import pytest
from langchain_tests.conftest import CustomPersister, CustomSerializer
from langchain_tests.conftest import _base_vcr_config as _base_vcr_config
from vcr import VCR # type: ignore[import-untyped]
_EXTRA_HEADERS = [
("openai-organization", "PLACEHOLDER"),
("user-agent", "PLACEHOLDER"),
("x-openai-client-user-agent", "PLACEHOLDER"),
]
def remove_request_headers(request: Any) -> Any:
"""Remove sensitive headers from the request."""
for k in request.headers:
request.headers[k] = "**REDACTED**"
request.uri = "**REDACTED**"
return request
def remove_response_headers(response: dict) -> dict:
"""Remove sensitive headers from the response."""
for k in response["headers"]:
response["headers"][k] = "**REDACTED**"
return response
@pytest.fixture(scope="session")
def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811
"""Extend the default configuration coming from langchain_tests."""
config = _base_vcr_config.copy()
config.setdefault("filter_headers", []).extend(_EXTRA_HEADERS)
config["before_record_request"] = remove_request_headers
config["before_record_response"] = remove_response_headers
config["serializer"] = "yaml.gz"
config["path_transformer"] = VCR.ensure_suffix(".yaml.gz")
return config
def pytest_recording_configure(config: dict, vcr: VCR) -> None:
vcr.register_persister(CustomPersister())
vcr.register_serializer("yaml.gz", CustomSerializer())
|
from typing import Any
import pytest
from langchain_tests.conftest import YamlGzipSerializer
from langchain_tests.conftest import _base_vcr_config as _base_vcr_config
from vcr import VCR # type: ignore[import-untyped]
_EXTRA_HEADERS = [
("openai-organization", "PLACEHOLDER"),
("user-agent", "PLACEHOLDER"),
("x-openai-client-user-agent", "PLACEHOLDER"),
]
def remove_request_headers(request: Any) -> Any:
for k in request.headers:
request.headers[k] = "**REDACTED**"
request.uri = "**REDACTED**"
return request
def remove_response_headers(response: dict) -> dict:
for k in response["headers"]:
response["headers"][k] = "**REDACTED**"
return response
@pytest.fixture(scope="session")
def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811
"""
Extend the default configuration coming from langchain_tests.
"""
config = _base_vcr_config.copy()
config.setdefault("filter_headers", []).extend(_EXTRA_HEADERS)
config["before_record_request"] = remove_request_headers
config["before_record_response"] = remove_response_headers
config["serializer"] = "yaml.gz"
config["path_transformer"] = VCR.ensure_suffix(".yaml.gz")
return config
@pytest.fixture
def vcr(vcr_config: dict) -> VCR:
"""Override the default vcr fixture to include custom serializers"""
my_vcr = VCR(**vcr_config)
my_vcr.register_serializer("yaml.gz", YamlGzipSerializer)
return my_vcr
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataSample
from mmengine.dist import (broadcast_object_list, collect_results,
is_main_process)
class BaseEvaluator(metaclass=ABCMeta):
"""Base class for an evaluator.
The evaluator first processes each batch of data_samples and
predictions, and appends the processed results in to the results list.
Then it collects all results together from all ranks if distributed
training is used. Finally, it computes the metrics of the entire dataset.
A subclass of class:`BaseEvaluator` should assign a meaningful value to the
class attribute `default_prefix`. See the argument `prefix` for details.
Args:
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
"""
default_prefix: Optional[str] = None
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.results: List[Any] = []
self.prefix = prefix or self.default_prefix
if self.prefix is None:
warnings.warn('The prefix is not set in evaluator class '
f'{self.__class__.__name__}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
@abstractmethod
def process(self, data_batch: Sequence[Tuple[Any, BaseDataSample]],
predictions: Sequence[BaseDataSample]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, BaseDataSample]]): A batch of data
from the dataloader.
predictions (Sequence[BaseDataSample]): A batch of outputs from
the model.
"""
@abstractmethod
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
def evaluate(self, size: int) -> dict:
"""Evaluate the model performance of the whole dataset after processing
all batches.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data base on
this size.
Returns:
dict: Evaluation metrics dict on the val dataset. The keys are the
names of the metrics, and the values are corresponding results.
"""
if len(self.results) == 0:
warnings.warn(
f'{self.__class__.__name__} got empty `self._results`. Please '
'ensure that the processed results are properly added into '
'`self._results` in `process` method.')
results = collect_results(self.results, size, self.collect_device)
if is_main_process():
_metrics = self.compute_metrics(results) # type: ignore
# Add prefix to metric names
if self.prefix:
_metrics = {
'/'.join((self.prefix, k)): v
for k, v in _metrics.items()
}
metrics = [_metrics]
else:
metrics = [None] # type: ignore
broadcast_object_list(metrics)
# reset the results list
self.results.clear()
return metrics[0]
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataSample
from mmengine.dist import (broadcast_object_list, collect_results,
is_main_process)
class BaseEvaluator(metaclass=ABCMeta):
"""Base class for an evaluator.
The evaluator first processes each batch of data_samples and
predictions, and appends the processed results in to the results list.
Then it collects all results together from all ranks if distributed
training is used. Finally, it computes the metrics of the entire dataset.
A subclass of class:`BaseEvaluator` should assign a meanful value to the
class attribute `default_prefix`. See the argument `prefix` for details.
Args:
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
"""
default_prefix: Optional[str] = None
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.results: List[Any] = []
self.prefix = prefix or self.default_prefix
if self.prefix is None:
warnings.warn('The prefix is not set in evaluator class '
f'{self.__class__.__name__}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
@abstractmethod
def process(self, data_batch: Sequence[Tuple[Any, BaseDataSample]],
predictions: Sequence[BaseDataSample]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, BaseDataSample]]): A batch of data
from the dataloader.
predictions (Sequence[BaseDataSample]): A batch of outputs from
the model.
"""
@abstractmethod
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
def evaluate(self, size: int) -> dict:
"""Evaluate the model performance of the whole dataset after processing
all batches.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data base on
this size.
Returns:
dict: Evaluation metrics dict on the val dataset. The keys are the
names of the metrics, and the values are corresponding results.
"""
if len(self.results) == 0:
warnings.warn(
f'{self.__class__.__name__} got empty `self._results`. Please '
'ensure that the processed results are properly added into '
'`self._results` in `process` method.')
results = collect_results(self.results, size, self.collect_device)
if is_main_process():
_metrics = self.compute_metrics(results) # type: ignore
# Add prefix to metric names
if self.prefix:
_metrics = {
'/'.join((self.prefix, k)): v
for k, v in _metrics.items()
}
metrics = [_metrics]
else:
metrics = [None] # type: ignore
broadcast_object_list(metrics)
# reset the results list
self.results.clear()
return metrics[0]
|
"""A unit test meant to catch accidental introduction of non-optional dependencies."""
from collections.abc import Mapping
from pathlib import Path
from typing import Any
import pytest
import toml
from packaging.requirements import Requirement
HERE = Path(__file__).parent
PYPROJECT_TOML = HERE / "../../pyproject.toml"
@pytest.fixture()
def uv_conf() -> dict[str, Any]:
"""Load the pyproject.toml file."""
with open(PYPROJECT_TOML) as f:
return toml.load(f)
def test_required_dependencies(uv_conf: Mapping[str, Any]) -> None:
"""A test that checks if a new non-optional dependency is being introduced.
If this test is triggered, it means that a contributor is trying to introduce a new
required dependency. This should be avoided in most situations.
"""
# Get the dependencies from the [tool.poetry.dependencies] section
dependencies = uv_conf["project"]["dependencies"]
required_dependencies = set(Requirement(dep).name for dep in dependencies)
assert sorted(required_dependencies) == sorted(
[
"PyYAML",
"SQLAlchemy",
"async-timeout",
"langchain-core",
"langchain-text-splitters",
"langsmith",
"pydantic",
"requests",
]
)
def test_test_group_dependencies(uv_conf: Mapping[str, Any]) -> None:
"""Check if someone is attempting to add additional test dependencies.
Only dependencies associated with test running infrastructure should be added
to the test group; e.g., pytest, pytest-cov etc.
Examples of dependencies that should NOT be included: boto3, azure, postgres, etc.
"""
dependencies = uv_conf["dependency-groups"]["test"]
test_group_deps = set(Requirement(dep).name for dep in dependencies)
assert sorted(test_group_deps) == sorted(
[
"duckdb-engine",
"freezegun",
"langchain-core",
"langchain-tests",
"langchain-text-splitters",
"langchain-openai",
"lark",
"packaging",
"pandas",
"pytest",
"pytest-asyncio",
"pytest-cov",
"pytest-dotenv",
"pytest-mock",
"pytest-socket",
"pytest-watcher",
"pytest-xdist",
"blockbuster",
"responses",
"syrupy",
"toml",
"requests-mock",
# TODO: temporary hack since cffi 1.17.1 doesn't work with py 3.9.
"cffi",
"numpy",
]
)
|
"""A unit test meant to catch accidental introduction of non-optional dependencies."""
from pathlib import Path
from typing import Any, Dict, Mapping
import pytest
import toml
from packaging.requirements import Requirement
HERE = Path(__file__).parent
PYPROJECT_TOML = HERE / "../../pyproject.toml"
@pytest.fixture()
def uv_conf() -> Dict[str, Any]:
"""Load the pyproject.toml file."""
with open(PYPROJECT_TOML) as f:
return toml.load(f)
def test_required_dependencies(uv_conf: Mapping[str, Any]) -> None:
"""A test that checks if a new non-optional dependency is being introduced.
If this test is triggered, it means that a contributor is trying to introduce a new
required dependency. This should be avoided in most situations.
"""
# Get the dependencies from the [tool.poetry.dependencies] section
dependencies = uv_conf["project"]["dependencies"]
required_dependencies = set(Requirement(dep).name for dep in dependencies)
assert sorted(required_dependencies) == sorted(
[
"PyYAML",
"SQLAlchemy",
"async-timeout",
"langchain-core",
"langchain-text-splitters",
"langsmith",
"pydantic",
"requests",
]
)
def test_test_group_dependencies(uv_conf: Mapping[str, Any]) -> None:
"""Check if someone is attempting to add additional test dependencies.
Only dependencies associated with test running infrastructure should be added
to the test group; e.g., pytest, pytest-cov etc.
Examples of dependencies that should NOT be included: boto3, azure, postgres, etc.
"""
dependencies = uv_conf["dependency-groups"]["test"]
test_group_deps = set(Requirement(dep).name for dep in dependencies)
assert sorted(test_group_deps) == sorted(
[
"duckdb-engine",
"freezegun",
"langchain-core",
"langchain-tests",
"langchain-text-splitters",
"langchain-openai",
"lark",
"packaging",
"pandas",
"pytest",
"pytest-asyncio",
"pytest-cov",
"pytest-dotenv",
"pytest-mock",
"pytest-socket",
"pytest-watcher",
"pytest-xdist",
"blockbuster",
"responses",
"syrupy",
"toml",
"requests-mock",
# TODO: temporary hack since cffi 1.17.1 doesn't work with py 3.9.
"cffi",
"numpy",
]
)
|
from abc import ABC
from docarray.array.storage.sqlite.backend import BackendMixin, SqliteConfig
from docarray.array.storage.sqlite.getsetdel import GetSetDelMixin
from docarray.array.storage.sqlite.seqlike import SequenceLikeMixin
from docarray.array.storage.memory.find import (
FindMixin,
) # temporary delegate to in-memory find API
__all__ = ['StorageMixins', 'SqliteConfig']
class StorageMixins(FindMixin, BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
|
from abc import ABC
from .backend import BackendMixin, SqliteConfig
from .getsetdel import GetSetDelMixin
from .seqlike import SequenceLikeMixin
from ..memory.find import FindMixin # temporary delegate to in-memory find API
__all__ = ['StorageMixins', 'SqliteConfig']
class StorageMixins(FindMixin, BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import UnstructuredOrgModeLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"UnstructuredOrgModeLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"UnstructuredOrgModeLoader",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import UnstructuredOrgModeLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"UnstructuredOrgModeLoader": "langchain_community.document_loaders"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"UnstructuredOrgModeLoader",
]
|
_base_ = 'ssd300_voc0712.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
input_size=input_size,
strides=[8, 16, 32, 64, 128, 256, 512],
basesize_ratio_range=(0.15, 0.9),
ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]))))
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = 'ssd300_voc0712.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
input_size=input_size,
strides=[8, 16, 32, 64, 128, 256, 512],
basesize_ratio_range=(0.15, 0.9),
ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]))))
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.base_document.abstract_document import AbstractDocument
from docarray.base_document.base_node import BaseNode
from docarray.typing.proto_register import _PROTO_TYPE_NAME_TO_CLASS
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type_dict = _PROTO_TYPE_NAME_TO_CLASS
content_key = value.WhichOneof('content')
content_type = (
value.type if value.WhichOneof('docarray_type') is not None else None
)
if torch_imported:
from docarray.typing import TorchEmbedding
from docarray.typing.tensor.torch_tensor import TorchTensor
content_type_dict['torch'] = TorchTensor
content_type_dict['torch_embedding'] = TorchEmbedding
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_key)
)
elif content_key == 'document':
fields[field] = cls._get_field_type(field).from_protobuf(
value.document
) # we get to the parent class
elif content_key == 'document_array':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.document_array
) # we get to the parent class
elif content_key is None:
fields[field] = None
elif content_type is None:
if content_key == 'text':
fields[field] = value.text
elif content_key == 'blob':
fields[field] = value.blob
else:
raise ValueError(
f'type {content_type}, with key {content_key} is not supported for'
f' deserialization'
)
return cls.construct(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
from docarray.proto import DocumentProto, NodeProto
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> 'NodeProto':
from docarray.proto import NodeProto
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(document=self.to_protobuf())
|
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.base_document.abstract_document import AbstractDocument
from docarray.base_document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
from docarray.typing.tensor.torch_tensor import TorchTensor
torch_imported = True
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
from docarray.typing import ( # TorchTensor,
ID,
AnyEmbedding,
AnyUrl,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
)
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
ndarray=NdArray,
embedding=AnyEmbedding,
any_url=AnyUrl,
text_url=TextUrl,
image_url=ImageUrl,
mesh_url=Mesh3DUrl,
point_cloud_url=PointCloud3DUrl,
id=ID,
)
if torch_imported:
content_type_dict['torch_tensor'] = TorchTensor
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_field_type(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls.construct(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
from docarray.proto import DocumentProto, NodeProto
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> 'NodeProto':
from docarray.proto import NodeProto
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
_base_ = 'faster-rcnn_r50_fpn_crop640-50e_coco.py'
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
neck=dict(
type='FPG',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
inter_channels=256,
num_outs=5,
stack_times=9,
paths=['bu'] * 9,
same_down_trans=None,
same_up_trans=dict(
type='conv',
kernel_size=3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_lateral_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_down_trans=dict(
type='interpolation_conv',
mode='nearest',
kernel_size=3,
norm_cfg=norm_cfg,
order=('act', 'conv', 'norm'),
inplace=False),
across_up_trans=None,
across_skip_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
output_trans=dict(
type='last_conv',
kernel_size=3,
order=('act', 'conv', 'norm'),
inplace=False),
norm_cfg=norm_cfg,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()]))
|
_base_ = 'faster_rcnn_r50_fpn_crop640_50e_coco.py'
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
neck=dict(
type='FPG',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
inter_channels=256,
num_outs=5,
stack_times=9,
paths=['bu'] * 9,
same_down_trans=None,
same_up_trans=dict(
type='conv',
kernel_size=3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_lateral_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_down_trans=dict(
type='interpolation_conv',
mode='nearest',
kernel_size=3,
norm_cfg=norm_cfg,
order=('act', 'conv', 'norm'),
inplace=False),
across_up_trans=None,
across_skip_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
output_trans=dict(
type='last_conv',
kernel_size=3,
order=('act', 'conv', 'norm'),
inplace=False),
norm_cfg=norm_cfg,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()]))
|
"""**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501
from collections.abc import Sequence
def _get_sub_deps(packages: Sequence[str]) -> list[str]:
"""Get any specified sub-dependencies."""
from importlib import metadata
sub_deps = set()
_underscored_packages = {pkg.replace("-", "_") for pkg in packages}
for pkg in packages:
try:
required = metadata.requires(pkg)
except metadata.PackageNotFoundError:
continue
if not required:
continue
for req in required:
cleaned_req = req.split(" ")[0]
if cleaned_req.replace("-", "_") not in _underscored_packages:
sub_deps.add(cleaned_req)
return sorted(sub_deps, key=lambda x: x.lower())
def print_sys_info(*, additional_pkgs: Sequence[str] = ()) -> None:
"""Print information about the environment for debugging purposes.
Args:
additional_pkgs: Additional packages to include in the output.
"""
import pkgutil
import platform
import sys
from importlib import metadata, util
# Packages that do not start with "langchain" prefix.
other_langchain_packages = [
"langserve",
"langsmith",
]
langchain_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langchain")
]
langgraph_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langgraph")
]
all_packages = sorted(
set(
langchain_pkgs
+ langgraph_pkgs
+ other_langchain_packages
+ list(additional_pkgs)
)
)
# Always surface these packages to the top
order_by = ["langchain_core", "langchain", "langchain_community", "langsmith"]
for pkg in reversed(order_by):
if pkg in all_packages:
all_packages.remove(pkg)
all_packages = [pkg] + list(all_packages)
system_info = {
"OS": platform.system(),
"OS Version": platform.version(),
"Python Version": sys.version,
}
print() # noqa: T201
print("System Information") # noqa: T201
print("------------------") # noqa: T201
print("> OS: ", system_info["OS"]) # noqa: T201
print("> OS Version: ", system_info["OS Version"]) # noqa: T201
print("> Python Version: ", system_info["Python Version"]) # noqa: T201
# Print out only langchain packages
print() # noqa: T201
print("Package Information") # noqa: T201
print("-------------------") # noqa: T201
not_installed = []
for pkg in all_packages:
try:
found_package = util.find_spec(pkg)
except Exception:
found_package = None
if found_package is None:
not_installed.append(pkg)
continue
# Package version
try:
package_version = metadata.version(pkg)
except Exception:
package_version = None
# Print package with version
if package_version is not None:
print(f"> {pkg}: {package_version}") # noqa: T201
else:
print(f"> {pkg}: Installed. No version info available.") # noqa: T201
if not_installed:
print() # noqa: T201
print("Optional packages not installed") # noqa: T201
print("-------------------------------") # noqa: T201
for pkg in not_installed:
print(f"> {pkg}") # noqa: T201
sub_dependencies = _get_sub_deps(all_packages)
if sub_dependencies:
print() # noqa: T201
print("Other Dependencies") # noqa: T201
print("------------------") # noqa: T201
for dep in sub_dependencies:
try:
dep_version = metadata.version(dep)
print(f"> {dep}: {dep_version}") # noqa: T201
except Exception:
print(f"> {dep}: Installed. No version info available.") # noqa: T201
if __name__ == "__main__":
print_sys_info()
|
"""**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501
from collections.abc import Sequence
def _get_sub_deps(packages: Sequence[str]) -> list[str]:
"""Get any specified sub-dependencies."""
from importlib import metadata
sub_deps = set()
_underscored_packages = {pkg.replace("-", "_") for pkg in packages}
for pkg in packages:
try:
required = metadata.requires(pkg)
except metadata.PackageNotFoundError:
continue
if not required:
continue
for req in required:
try:
cleaned_req = req.split(" ")[0]
except Exception: # In case parsing of requirement spec fails
continue
if cleaned_req.replace("-", "_") not in _underscored_packages:
sub_deps.add(cleaned_req)
return sorted(sub_deps, key=lambda x: x.lower())
def print_sys_info(*, additional_pkgs: Sequence[str] = ()) -> None:
"""Print information about the environment for debugging purposes.
Args:
additional_pkgs: Additional packages to include in the output.
"""
import pkgutil
import platform
import sys
from importlib import metadata, util
# Packages that do not start with "langchain" prefix.
other_langchain_packages = [
"langserve",
"langsmith",
]
langchain_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langchain")
]
langgraph_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langgraph")
]
all_packages = sorted(
set(
langchain_pkgs
+ langgraph_pkgs
+ other_langchain_packages
+ list(additional_pkgs)
)
)
# Always surface these packages to the top
order_by = ["langchain_core", "langchain", "langchain_community", "langsmith"]
for pkg in reversed(order_by):
if pkg in all_packages:
all_packages.remove(pkg)
all_packages = [pkg] + list(all_packages)
system_info = {
"OS": platform.system(),
"OS Version": platform.version(),
"Python Version": sys.version,
}
print() # noqa: T201
print("System Information") # noqa: T201
print("------------------") # noqa: T201
print("> OS: ", system_info["OS"]) # noqa: T201
print("> OS Version: ", system_info["OS Version"]) # noqa: T201
print("> Python Version: ", system_info["Python Version"]) # noqa: T201
# Print out only langchain packages
print() # noqa: T201
print("Package Information") # noqa: T201
print("-------------------") # noqa: T201
not_installed = []
for pkg in all_packages:
try:
found_package = util.find_spec(pkg)
except Exception:
found_package = None
if found_package is None:
not_installed.append(pkg)
continue
# Package version
try:
package_version = metadata.version(pkg)
except Exception:
package_version = None
# Print package with version
if package_version is not None:
print(f"> {pkg}: {package_version}") # noqa: T201
else:
print(f"> {pkg}: Installed. No version info available.") # noqa: T201
if not_installed:
print() # noqa: T201
print("Optional packages not installed") # noqa: T201
print("-------------------------------") # noqa: T201
for pkg in not_installed:
print(f"> {pkg}") # noqa: T201
sub_dependencies = _get_sub_deps(all_packages)
if sub_dependencies:
print() # noqa: T201
print("Other Dependencies") # noqa: T201
print("------------------") # noqa: T201
for dep in sub_dependencies:
try:
dep_version = metadata.version(dep)
print(f"> {dep}: {dep_version}") # noqa: T201
except Exception:
print(f"> {dep}: Installed. No version info available.") # noqa: T201
if __name__ == "__main__":
print_sys_info()
|
import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clamp(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clamp(0, h)
br_x = ((cx * w) + (w * bw / 2)).clamp(0, w)
br_y = ((cy * h) + (h * bh / 2)).clamp(0, h)
bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=0).T
return bboxes
def _create_panoptic_data(num_boxes, h, w):
sem_seg = np.zeros((h, w), dtype=np.int64) + 2
bboxes = _rand_bboxes(num_boxes, h, w).int()
labels = torch.randint(2, (num_boxes, ))
for i in range(num_boxes):
x, y, w, h = bboxes[i]
sem_seg[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + labels[i]
return sem_seg[None]
class TestDetLocalVisualizer(TestCase):
def test_add_datasample(self):
h = 12
w = 10
num_class = 3
num_bboxes = 5
out_file = 'out_file.jpg'
image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')
# test gt_instances
gt_instances = InstanceData()
gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
gt_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
det_data_sample = DetDataSample()
det_data_sample.gt_instances = gt_instances
det_local_visualizer = DetLocalVisualizer()
det_local_visualizer.add_datasample(
'image', image, det_data_sample, draw_pred=False)
# test out_file
det_local_visualizer.add_datasample(
'image',
image,
det_data_sample,
draw_pred=False,
out_file=out_file)
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == (h, w, 3)
os.remove(out_file)
# test gt_instances and pred_instances
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
pred_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
pred_instances.scores = torch.rand((num_bboxes, ))
det_data_sample.pred_instances = pred_instances
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
det_local_visualizer.add_datasample(
'image', image, det_data_sample, draw_gt=False, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
det_local_visualizer.add_datasample(
'image',
image,
det_data_sample,
draw_pred=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
# test gt_panoptic_seg and pred_panoptic_seg
det_local_visualizer.dataset_meta = dict(classes=('1', '2'))
gt_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=gt_sem_seg)
det_data_sample = DetDataSample()
det_data_sample.gt_panoptic_seg = panoptic_seg
pred_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=pred_sem_seg)
det_data_sample.pred_panoptic_seg = panoptic_seg
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
# class information must be provided
det_local_visualizer.dataset_meta = {}
with self.assertRaises(AssertionError):
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
def _assert_image_and_shape(self, out_file, out_shape):
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == out_shape
os.remove(out_file)
|
import os
from unittest import TestCase
import cv2
import numpy as np
import torch
from mmengine.structures import InstanceData, PixelData
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.structures import DetDataSample
from mmdet.visualization import DetLocalVisualizer
def _rand_bboxes(num_boxes, h, w):
cx, cy, bw, bh = torch.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clamp(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clamp(0, h)
br_x = ((cx * w) + (w * bw / 2)).clamp(0, w)
br_y = ((cy * h) + (h * bh / 2)).clamp(0, h)
bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=0).T
return bboxes
def _create_panoptic_data(num_boxes, h, w):
sem_seg = np.zeros((h, w), dtype=np.int64) + 2
bboxes = _rand_bboxes(num_boxes, h, w).int()
labels = torch.randint(2, (num_boxes, ))
for i in range(num_boxes):
x, y, w, h = bboxes[i]
sem_seg[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + labels[i]
return sem_seg[None]
class TestDetLocalVisualizer(TestCase):
def test_add_datasample(self):
h = 12
w = 10
num_class = 3
num_bboxes = 5
out_file = 'out_file.jpg'
image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')
# test gt_instances
gt_instances = InstanceData()
gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
gt_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
det_data_sample = DetDataSample()
det_data_sample.gt_instances = gt_instances
det_local_visualizer = DetLocalVisualizer()
det_local_visualizer.add_datasample(
'image', image, det_data_sample, draw_pred=False)
# test out_file
det_local_visualizer.add_datasample(
'image',
image,
det_data_sample,
draw_pred=False,
out_file=out_file)
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == (h, w, 3)
os.remove(out_file)
# test gt_instances and pred_instances
pred_instances = InstanceData()
pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)
pred_instances.labels = torch.randint(0, num_class, (num_bboxes, ))
pred_instances.scores = torch.rand((num_bboxes, ))
det_data_sample.pred_instances = pred_instances
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
det_local_visualizer.add_datasample(
'image', image, det_data_sample, draw_gt=False, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
det_local_visualizer.add_datasample(
'image',
image,
det_data_sample,
draw_pred=False,
out_file=out_file)
self._assert_image_and_shape(out_file, (h, w, 3))
# test gt_panoptic_seg and pred_panoptic_seg
det_local_visualizer.dataset_meta = dict(CLASSES=('1', '2'))
gt_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=gt_sem_seg)
det_data_sample = DetDataSample()
det_data_sample.gt_panoptic_seg = panoptic_seg
pred_sem_seg = _create_panoptic_data(num_bboxes, h, w)
panoptic_seg = PixelData(sem_seg=pred_sem_seg)
det_data_sample.pred_panoptic_seg = panoptic_seg
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
self._assert_image_and_shape(out_file, (h, w * 2, 3))
# class information must be provided
det_local_visualizer.dataset_meta = {}
with self.assertRaises(AssertionError):
det_local_visualizer.add_datasample(
'image', image, det_data_sample, out_file=out_file)
def _assert_image_and_shape(self, out_file, out_shape):
assert os.path.exists(out_file)
drawn_img = cv2.imread(out_file)
assert drawn_img.shape == out_shape
os.remove(out_file)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.distribution.distribution_lib import DataParallel as DataParallel
from keras.src.distribution.distribution_lib import DeviceMesh as DeviceMesh
from keras.src.distribution.distribution_lib import LayoutMap as LayoutMap
from keras.src.distribution.distribution_lib import (
ModelParallel as ModelParallel,
)
from keras.src.distribution.distribution_lib import TensorLayout as TensorLayout
from keras.src.distribution.distribution_lib import (
distribute_tensor as distribute_tensor,
)
from keras.src.distribution.distribution_lib import distribution as distribution
from keras.src.distribution.distribution_lib import initialize as initialize
from keras.src.distribution.distribution_lib import list_devices as list_devices
from keras.src.distribution.distribution_lib import (
set_distribution as set_distribution,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.distribution.distribution_lib import DataParallel
from keras.src.distribution.distribution_lib import DeviceMesh
from keras.src.distribution.distribution_lib import LayoutMap
from keras.src.distribution.distribution_lib import ModelParallel
from keras.src.distribution.distribution_lib import TensorLayout
from keras.src.distribution.distribution_lib import distribute_tensor
from keras.src.distribution.distribution_lib import distribution
from keras.src.distribution.distribution_lib import initialize
from keras.src.distribution.distribution_lib import list_devices
from keras.src.distribution.distribution_lib import set_distribution
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
class WeightedLayerPooling(Module):
"""Token embeddings are weighted mean of their different hidden layer representations"""
config_keys: list[str] = ["word_embedding_dimension", "layer_start", "num_hidden_layers"]
def __init__(
self, word_embedding_dimension, num_hidden_layers: int = 12, layer_start: int = 4, layer_weights=None
):
super().__init__()
self.word_embedding_dimension = word_embedding_dimension
self.layer_start = layer_start
self.num_hidden_layers = num_hidden_layers
self.layer_weights = (
layer_weights
if layer_weights is not None
else nn.Parameter(torch.tensor([1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float))
)
def forward(self, features: dict[str, Tensor]):
ft_all_layers = features["all_layer_embeddings"]
all_layer_embedding = torch.stack(ft_all_layers)
all_layer_embedding = all_layer_embedding[self.layer_start :, :, :, :] # Start from 4th layers output
weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())
weighted_average = (weight_factor * all_layer_embedding).sum(dim=0) / self.layer_weights.sum()
features.update({"token_embeddings": weighted_average})
return features
def get_word_embedding_dimension(self):
return self.word_embedding_dimension
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class WeightedLayerPooling(nn.Module):
"""Token embeddings are weighted mean of their different hidden layer representations"""
def __init__(
self, word_embedding_dimension, num_hidden_layers: int = 12, layer_start: int = 4, layer_weights=None
):
super().__init__()
self.config_keys = ["word_embedding_dimension", "layer_start", "num_hidden_layers"]
self.word_embedding_dimension = word_embedding_dimension
self.layer_start = layer_start
self.num_hidden_layers = num_hidden_layers
self.layer_weights = (
layer_weights
if layer_weights is not None
else nn.Parameter(torch.tensor([1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float))
)
def forward(self, features: dict[str, Tensor]):
ft_all_layers = features["all_layer_embeddings"]
all_layer_embedding = torch.stack(ft_all_layers)
all_layer_embedding = all_layer_embedding[self.layer_start :, :, :, :] # Start from 4th layers output
weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())
weighted_average = (weight_factor * all_layer_embedding).sum(dim=0) / self.layer_weights.sum()
features.update({"token_embeddings": weighted_average})
return features
def get_word_embedding_dimension(self):
return self.word_embedding_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = WeightedLayerPooling(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
from typing import TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class ContentPropertyMixin:
"""Provide helper functions for :class:`Document` to allow universal content property access."""
@property
def content_hash(self) -> int:
"""Get the document hash according to its content.
:return: the unique hash code to represent this Document
"""
return hash(self)
def convert_content_to_datauri(self: 'T') -> 'T':
"""Convert :attr:`.content` in :attr:`.uri` inplace with best effort
:return: itself after processed
"""
if self.text:
self.convert_text_to_datauri()
elif self.blob:
self.convert_blob_to_datauri()
elif self.content_type:
raise NotImplementedError
return self
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from docarray.typing import T
class ContentPropertyMixin:
"""Provide helper functions for :class:`Document` to allow universal content property access."""
@property
def content_hash(self) -> int:
"""Get the document hash according to its content.
:return: the unique hash code to represent this Document
"""
return hash(self)
def convert_content_to_datauri(self: 'T') -> 'T':
"""Convert :attr:`.content` in :attr:`.uri` inplace with best effort
:return: itself after processed
"""
if self.text:
self.convert_text_to_datauri()
elif self.blob:
self.convert_blob_to_datauri()
elif self.content_type:
raise NotImplementedError
return self
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc0'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.1.0'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc0'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.0.0'
mmengine_maximum_version = '0.2.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
from . import dataset, dist_utils, metrics
__all__ = ["dataset", "dist_utils", "metrics"]
|
from . import (
dataset,
dist_utils,
metrics,
)
__all__ = ["dataset", "dist_utils", "metrics"]
|
__version__ = '0.13.19'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.19'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet import MobileNet as MobileNet
from keras.src.applications.mobilenet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.mobilenet import (
preprocess_input as preprocess_input,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet import MobileNet
from keras.src.applications.mobilenet import decode_predictions
from keras.src.applications.mobilenet import preprocess_input
|
_base_ = '../ssd/ssd300_coco.py'
model = dict(
bbox_head=dict(type='PISASSDHead'),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = '../ssd/ssd300_coco.py'
model = dict(
bbox_head=dict(type='PISASSDHead'),
train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=35, norm_type=2)))
|
from docarray import BaseDocument
from docarray.typing import PointCloud3DUrl
def test_set_point_cloud_url():
class MyDocument(BaseDocument):
point_cloud_url: PointCloud3DUrl
d = MyDocument(point_cloud_url="https://jina.ai/mesh.obj")
assert isinstance(d.point_cloud_url, PointCloud3DUrl)
assert d.point_cloud_url == "https://jina.ai/mesh.obj"
|
from docarray import Document
from docarray.typing import PointCloud3DUrl
def test_set_point_cloud_url():
class MyDocument(Document):
point_cloud_url: PointCloud3DUrl
d = MyDocument(point_cloud_url="https://jina.ai/mesh.obj")
assert isinstance(d.point_cloud_url, PointCloud3DUrl)
assert d.point_cloud_url == "https://jina.ai/mesh.obj"
|
"""Standard LangChain interface tests"""
from typing import Type
import pytest # type: ignore[import-not-found]
from langchain_core.language_models import BaseChatModel
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_xai import ChatXAI
# Initialize the rate limiter in global scope, so it can be re-used
# across tests.
rate_limiter = InMemoryRateLimiter(
requests_per_second=0.5,
)
class TestXAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatXAI
@property
def chat_model_params(self) -> dict:
return {
"model": "grok-2",
"rate_limiter": rate_limiter,
}
@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
|
"""Standard LangChain interface tests"""
from typing import Optional, Type
import pytest # type: ignore[import-not-found]
from langchain_core.language_models import BaseChatModel
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_xai import ChatXAI
# Initialize the rate limiter in global scope, so it can be re-used
# across tests.
rate_limiter = InMemoryRateLimiter(
requests_per_second=0.5,
)
class TestXAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatXAI
@property
def chat_model_params(self) -> dict:
return {
"model": "grok-2",
"rate_limiter": rate_limiter,
}
@property
def tool_choice_value(self) -> Optional[str]:
"""Value to use for tool choice when used in tests."""
return "tool_name"
@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
|
"""Generate migrations for partner packages."""
import importlib
from langchain_core.documents import BaseDocumentCompressor, BaseDocumentTransformer
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.retrievers import BaseRetriever
from langchain_core.vectorstores import VectorStore
from langchain_cli.namespaces.migrate.generate.utils import (
COMMUNITY_PKG,
find_subclasses_in_module,
list_classes_by_package,
list_init_imports_by_package,
)
# PUBLIC API
def get_migrations_for_partner_package(pkg_name: str) -> list[tuple[str, str]]:
"""Generate migrations from community package to partner package.
This code works
Args:
pkg_name (str): The name of the partner package.
Returns:
List of 2-tuples containing old and new import paths.
"""
package = importlib.import_module(pkg_name)
classes_ = find_subclasses_in_module(
package,
[
BaseLanguageModel,
Embeddings,
BaseRetriever,
VectorStore,
BaseDocumentTransformer,
BaseDocumentCompressor,
],
)
community_classes = list_classes_by_package(str(COMMUNITY_PKG))
imports_for_pkg = list_init_imports_by_package(str(COMMUNITY_PKG))
old_paths = community_classes + imports_for_pkg
return [
(f"{module}.{item}", f"{pkg_name}.{item}")
for module, item in old_paths
if item in classes_
]
|
"""Generate migrations for partner packages."""
import importlib
from langchain_core.documents import BaseDocumentCompressor, BaseDocumentTransformer
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.retrievers import BaseRetriever
from langchain_core.vectorstores import VectorStore
from langchain_cli.namespaces.migrate.generate.utils import (
COMMUNITY_PKG,
find_subclasses_in_module,
list_classes_by_package,
list_init_imports_by_package,
)
# PUBLIC API
def get_migrations_for_partner_package(pkg_name: str) -> list[tuple[str, str]]:
"""Generate migrations from community package to partner package.
This code works
Args:
pkg_name (str): The name of the partner package.
Returns:
List of 2-tuples containing old and new import paths.
"""
package = importlib.import_module(pkg_name)
classes_ = find_subclasses_in_module(
package,
[
BaseLanguageModel,
Embeddings,
BaseRetriever,
VectorStore,
BaseDocumentTransformer,
BaseDocumentCompressor,
],
)
community_classes = list_classes_by_package(str(COMMUNITY_PKG))
imports_for_pkg = list_init_imports_by_package(str(COMMUNITY_PKG))
old_paths = community_classes + imports_for_pkg
migrations = [
(f"{module}.{item}", f"{pkg_name}.{item}")
for module, item in old_paths
if item in classes_
]
return migrations
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import ImageBytes
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = ImageDoc(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(ImageDoc, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(ImageDoc, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(ImageDoc, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_image_tensorflow():
image = ImageDoc(tensor=tf.zeros((10, 10, 3)))
assert tnp.allclose(image.tensor.tensor, tf.zeros((10, 10, 3)))
def test_image_shortcut_doc():
class MyDoc(BaseDoc):
image: ImageDoc
image2: ImageDoc
image3: ImageDoc
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = ImageDoc(url=REMOTE_JPG)
img.bytes_ = img.url.load_bytes()
assert isinstance(img.bytes_, ImageBytes)
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = ImageDoc(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes_ = img.tensor.to_bytes()
assert isinstance(img.bytes_, bytes)
assert isinstance(img.bytes_, ImageBytes)
assert len(img.bytes_) > 0
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import ImageBytes
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = ImageDoc(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(ImageDoc, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(ImageDoc, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(ImageDoc, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_image_tensorflow():
image = ImageDoc(tensor=tf.zeros((10, 10, 3)))
assert tnp.allclose(image.tensor.tensor, tf.zeros((10, 10, 3)))
def test_image_shortcut_doc():
class MyDoc(BaseDoc):
image: ImageDoc
image2: ImageDoc
image3: ImageDoc
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = ImageDoc(url=REMOTE_JPG)
img.bytes_ = img.url.load_bytes()
assert isinstance(img.bytes_, ImageBytes)
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = ImageDoc(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes_ = img.tensor.to_bytes()
assert isinstance(img.bytes_, bytes)
assert len(img.bytes_) > 0
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage all kinds of evaluators for computing metrics
EVALUATORS = Registry('evaluator')
# manage visualizer
VISUALIZERS = Registry('visualizer')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage all kinds of evaluators for computing metrics
EVALUATORS = Registry('evaluator')
|
from typing import BinaryIO, Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
if sinfo[5] == 0:
waveform, _ = _load_audio(s)
num_frames = waveform.size(1)
else:
num_frames = sinfo[5]
return AudioMetaData(
int(sinfo[8]),
num_frames,
sinfo[9],
sinfo[6],
sinfo[1].upper(),
)
def info_audio(
src: str,
format: Optional[str],
) -> AudioMetaData:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _info_audio(s)
def info_audio_fileobj(
src,
format: Optional[str],
buffer_size: int = 4096,
) -> AudioMetaData:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, buffer_size)
return _info_audio(s)
def _get_load_filter(
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
) -> Optional[str]:
if frame_offset < 0:
raise RuntimeError("Invalid argument: frame_offset must be non-negative. Found: {}".format(frame_offset))
if num_frames == 0 or num_frames < -1:
raise RuntimeError("Invalid argument: num_frames must be -1 or greater than 0. Found: {}".format(num_frames))
# All default values -> no filter
if frame_offset == 0 and num_frames == -1 and not convert:
return None
# Only convert
aformat = "aformat=sample_fmts=fltp"
if frame_offset == 0 and num_frames == -1 and convert:
return aformat
# At least one of frame_offset or num_frames has non-default value
if num_frames > 0:
atrim = "atrim=start_sample={}:end_sample={}".format(frame_offset, frame_offset + num_frames)
else:
atrim = "atrim=start_sample={}".format(frame_offset)
if not convert:
return atrim
return "{},{}".format(atrim, aformat)
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _load_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
) -> Tuple[torch.Tensor, int]:
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
sample_rate = int(sinfo[8])
option: Dict[str, str] = {}
s.add_audio_stream(i, -1, -1, _get_load_filter(frame_offset, num_frames, convert), None, option)
s.process_all_packets()
waveform = s.pop_chunks()[0]
if waveform is None:
raise RuntimeError("Failed to decode audio.")
assert waveform is not None
if channels_first:
waveform = waveform.T
return waveform, sample_rate
def load_audio(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
def load_audio_fileobj(
src: BinaryIO,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, buffer_size)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
|
from typing import Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
if sinfo[5] == 0:
waveform, _ = _load_audio(s)
num_frames = waveform.size(1)
else:
num_frames = sinfo[5]
return AudioMetaData(
int(sinfo[8]),
num_frames,
sinfo[9],
sinfo[6],
sinfo[1].upper(),
)
def info_audio(
src: str,
format: Optional[str],
) -> AudioMetaData:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _info_audio(s)
def info_audio_fileobj(
src,
format: Optional[str],
buffer_size: int = 4096,
) -> AudioMetaData:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, buffer_size)
return _info_audio(s)
def _get_load_filter(
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
) -> Optional[str]:
if frame_offset < 0:
raise RuntimeError("Invalid argument: frame_offset must be non-negative. Found: {}".format(frame_offset))
if num_frames == 0 or num_frames < -1:
raise RuntimeError("Invalid argument: num_frames must be -1 or greater than 0. Found: {}".format(num_frames))
# All default values -> no filter
if frame_offset == 0 and num_frames == -1 and not convert:
return None
# Only convert
aformat = "aformat=sample_fmts=fltp"
if frame_offset == 0 and num_frames == -1 and convert:
return aformat
# At least one of frame_offset or num_frames has non-default value
if num_frames > 0:
atrim = "atrim=start_sample={}:end_sample={}".format(frame_offset, frame_offset + num_frames)
else:
atrim = "atrim=start_sample={}".format(frame_offset)
if not convert:
return atrim
return "{},{}".format(atrim, aformat)
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _load_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
) -> Tuple[torch.Tensor, int]:
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
sample_rate = int(sinfo[8])
option: Dict[str, str] = {}
s.add_audio_stream(i, -1, -1, _get_load_filter(frame_offset, num_frames, convert), None, option)
s.process_all_packets()
waveform = s.pop_chunks()[0]
if waveform is None:
raise RuntimeError("Failed to decode audio.")
assert waveform is not None
if channels_first:
waveform = waveform.T
return waveform, sample_rate
def load_audio(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
def load_audio_fileobj(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, buffer_size)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss,
bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .mse_loss import MSELoss, mse_loss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 'GHMC',
'GHMR', 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss',
'l1_loss', 'isr_p', 'carl_loss', 'AssociativeEmbeddingLoss',
'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss',
'VarifocalLoss', 'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'DiceLoss'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss,
bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .mse_loss import MSELoss, mse_loss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 'GHMC',
'GHMR', 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss',
'l1_loss', 'isr_p', 'carl_loss', 'AssociativeEmbeddingLoss',
'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss',
'VarifocalLoss', 'KnowledgeDistillationKLDivLoss', 'SeesawLoss'
]
|
import argparse
import os
from typing import List
from jina.parsers.helper import CastHostAction
def api_to_dict(show_all_args: bool = False):
"""Convert Jina API to a dict
:param show_all_args: if set, then hidden args are also exported
:return: dict
"""
if show_all_args:
from jina.parsers import helper
helper._SHOW_ALL_ARGS, old_val = True, helper._SHOW_ALL_ARGS
from jina import __version__
from jina.parsers import get_main_parser
all_d = {
'name': 'Jina',
'description': 'Build multimodal AI services via cloud native technologies',
'license': 'Apache 2.0',
'vendor': 'Jina AI Limited',
'source': 'https://github.com/jina-ai/jina/tree/'
+ os.environ.get('JINA_VCS_VERSION', 'master'),
'url': 'https://jina.ai',
'docs': 'https://docs.jina.ai',
'authors': 'dev-team@jina.ai',
'version': __version__,
'methods': [],
'revision': os.environ.get('JINA_VCS_VERSION'),
}
def get_p(p, parent_d):
parsers = p()._actions[-1].choices
if parsers:
for p_name in parsers.keys():
d = {'name': p_name, 'options': [], 'help': parsers[p_name].description}
for ddd in _export_parser_args(
lambda *x: p()._actions[-1].choices[p_name], type_as_str=True
):
d['options'].append(ddd)
if not d['options']:
d['methods'] = []
get_p(lambda *x: parsers[p_name], d)
parent_d['methods'].append(d)
get_p(get_main_parser, all_d)
if show_all_args:
helper._SHOW_ALL_ARGS = old_val
return all_d
def _export_parser_args(parser_fn, type_as_str: bool = False, **kwargs):
from argparse import _StoreAction, _StoreTrueAction
from jina.enums import BetterEnum
from jina.parsers.helper import _SHOW_ALL_ARGS, CastToIntAction, KVAppendAction
port_attr = ('help', 'choices', 'default', 'required', 'option_strings', 'dest')
parser = parser_fn(**kwargs)
parser2 = parser_fn(**kwargs)
random_dest = set()
for a, b in zip(parser._actions, parser2._actions):
if a.default != b.default:
random_dest.add(a.dest)
for a in parser._actions:
if isinstance(
a,
(
_StoreAction,
_StoreTrueAction,
KVAppendAction,
CastToIntAction,
CastHostAction,
),
):
if not _SHOW_ALL_ARGS and a.help == argparse.SUPPRESS:
continue
ddd = {p: getattr(a, p) for p in port_attr}
if isinstance(a, _StoreTrueAction):
ddd['type'] = bool
elif isinstance(a, KVAppendAction):
ddd['type'] = dict
elif isinstance(a, CastToIntAction):
ddd['type'] = int
elif isinstance(a, CastHostAction):
ddd['type'] = str
else:
ddd['type'] = a.type
if ddd['choices']:
ddd['choices'] = [
str(k) if isinstance(k, BetterEnum) else k for k in ddd['choices']
]
ddd['type'] = str
if isinstance(ddd['default'], BetterEnum):
ddd['default'] = str(ddd['default'])
ddd['type'] = str
if ddd['type'] == str and (a.nargs == '*' or a.nargs == '+'):
ddd['type'] = List[str]
else:
continue
if a.dest in random_dest:
ddd['default_random'] = True
from jina.helper import random_identity, random_port
if isinstance(a.default, str):
ddd['default_factory'] = random_identity.__name__
elif isinstance(a.default, int):
ddd['default_factory'] = random_port.__name__
else:
ddd['default_random'] = False
if type_as_str:
ddd['type'] = getattr(ddd['type'], '__name__', str(ddd['type']))
ddd['name'] = ddd.pop('dest')
yield ddd
|
import argparse
import os
from typing import List, Union
from jina.parsers.helper import CastHostAction
def api_to_dict(show_all_args: bool = False):
"""Convert Jina API to a dict
:param show_all_args: if set, then hidden args are also exported
:return: dict
"""
if show_all_args:
from jina.parsers import helper
helper._SHOW_ALL_ARGS, old_val = True, helper._SHOW_ALL_ARGS
from jina import __version__
from jina.parsers import get_main_parser
all_d = {
'name': 'Jina',
'description': 'Build multimodal AI services via cloud native technologies',
'license': 'Apache 2.0',
'vendor': 'Jina AI Limited',
'source': 'https://github.com/jina-ai/jina/tree/'
+ os.environ.get('JINA_VCS_VERSION', 'master'),
'url': 'https://jina.ai',
'docs': 'https://docs.jina.ai',
'authors': 'dev-team@jina.ai',
'version': __version__,
'methods': [],
'revision': os.environ.get('JINA_VCS_VERSION'),
}
def get_p(p, parent_d):
parsers = p()._actions[-1].choices
if parsers:
for p_name in parsers.keys():
d = {'name': p_name, 'options': [], 'help': parsers[p_name].description}
for ddd in _export_parser_args(
lambda *x: p()._actions[-1].choices[p_name], type_as_str=True
):
d['options'].append(ddd)
if not d['options']:
d['methods'] = []
get_p(lambda *x: parsers[p_name], d)
parent_d['methods'].append(d)
get_p(get_main_parser, all_d)
if show_all_args:
helper._SHOW_ALL_ARGS = old_val
return all_d
def _export_parser_args(parser_fn, type_as_str: bool = False, **kwargs):
from argparse import _StoreAction, _StoreTrueAction
from jina.enums import BetterEnum
from jina.parsers.helper import _SHOW_ALL_ARGS, CastToIntAction, KVAppendAction
port_attr = ('help', 'choices', 'default', 'required', 'option_strings', 'dest')
parser = parser_fn(**kwargs)
parser2 = parser_fn(**kwargs)
random_dest = set()
for a, b in zip(parser._actions, parser2._actions):
if a.default != b.default:
random_dest.add(a.dest)
for a in parser._actions:
if isinstance(
a,
(
_StoreAction,
_StoreTrueAction,
KVAppendAction,
CastToIntAction,
CastHostAction,
),
):
if not _SHOW_ALL_ARGS and a.help == argparse.SUPPRESS:
continue
ddd = {p: getattr(a, p) for p in port_attr}
if isinstance(a, _StoreTrueAction):
ddd['type'] = bool
elif isinstance(a, KVAppendAction):
ddd['type'] = dict
elif isinstance(a, CastToIntAction):
ddd['type'] = int
elif isinstance(a, CastHostAction):
ddd['type'] = str
else:
ddd['type'] = a.type
if ddd['choices']:
ddd['choices'] = [
str(k) if isinstance(k, BetterEnum) else k for k in ddd['choices']
]
ddd['type'] = str
if isinstance(ddd['default'], BetterEnum):
ddd['default'] = str(ddd['default'])
ddd['type'] = str
if ddd['type'] == str and (a.nargs == '*' or a.nargs == '+'):
ddd['type'] = List[str]
else:
continue
if a.dest in random_dest:
ddd['default_random'] = True
from jina.helper import random_identity, random_port
if isinstance(a.default, str):
ddd['default_factory'] = random_identity.__name__
elif isinstance(a.default, int):
ddd['default_factory'] = random_port.__name__
else:
ddd['default_random'] = False
if type_as_str:
ddd['type'] = getattr(ddd['type'], '__name__', str(ddd['type']))
ddd['name'] = ddd.pop('dest')
yield ddd
|
import enum
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "dtd"
class DTDDemux(enum.IntEnum):
SPLIT = 0
JOINT_CATEGORIES = 1
IMAGES = 2
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class DTD(Dataset):
"""DTD Dataset.
homepage="https://www.robots.ox.ac.uk/~vgg/data/dtd/",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
fold: int = 1,
skip_validation_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
if not (1 <= fold <= 10):
raise ValueError(f"The fold parameter should be an integer in [1, 10]. Got {fold}")
self._fold = fold
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_validation_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz",
sha256="e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205",
preprocess="decompress",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parent.name == "labels":
if path.name == "labels_joint_anno.txt":
return DTDDemux.JOINT_CATEGORIES
return DTDDemux.SPLIT
elif path.parents[1].name == "images":
return DTDDemux.IMAGES
else:
return None
def _image_key_fn(self, data: Tuple[str, Any]) -> str:
path = pathlib.Path(data[0])
# The split files contain hardcoded posix paths for the images, e.g. banded/banded_0001.jpg
return str(path.relative_to(path.parents[1]).as_posix())
def _prepare_sample(self, data: Tuple[Tuple[str, List[str]], Tuple[str, BinaryIO]]) -> Dict[str, Any]:
(_, joint_categories_data), image_data = data
_, *joint_categories = joint_categories_data
path, buffer = image_data
category = pathlib.Path(path).parent.name
return dict(
joint_categories={category for category in joint_categories if category},
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
splits_dp, joint_categories_dp, images_dp = Demultiplexer(
archive_dp, 3, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
splits_dp = Filter(splits_dp, path_comparator("name", f"{self._split}{self._fold}.txt"))
splits_dp = LineReader(splits_dp, decode=True, return_path=False)
splits_dp = hint_shuffling(splits_dp)
splits_dp = hint_sharding(splits_dp)
joint_categories_dp = CSVParser(joint_categories_dp, delimiter=" ")
dp = IterKeyZipper(
splits_dp,
joint_categories_dp,
key_fn=getitem(),
ref_key_fn=getitem(0),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._image_key_fn,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _filter_images(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == DTDDemux.IMAGES
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, self._filter_images)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
def __len__(self) -> int:
return 1_880 # All splits have the same length
|
import enum
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from .._api import register_dataset, register_info
NAME = "dtd"
class DTDDemux(enum.IntEnum):
SPLIT = 0
JOINT_CATEGORIES = 1
IMAGES = 2
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class DTD(Dataset):
"""DTD Dataset.
homepage="https://www.robots.ox.ac.uk/~vgg/data/dtd/",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
fold: int = 1,
skip_validation_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
if not (1 <= fold <= 10):
raise ValueError(f"The fold parameter should be an integer in [1, 10]. Got {fold}")
self._fold = fold
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_validation_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz",
sha256="e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205",
preprocess="decompress",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parent.name == "labels":
if path.name == "labels_joint_anno.txt":
return DTDDemux.JOINT_CATEGORIES
return DTDDemux.SPLIT
elif path.parents[1].name == "images":
return DTDDemux.IMAGES
else:
return None
def _image_key_fn(self, data: Tuple[str, Any]) -> str:
path = pathlib.Path(data[0])
# The split files contain hardcoded posix paths for the images, e.g. banded/banded_0001.jpg
return str(path.relative_to(path.parents[1]).as_posix())
def _prepare_sample(self, data: Tuple[Tuple[str, List[str]], Tuple[str, BinaryIO]]) -> Dict[str, Any]:
(_, joint_categories_data), image_data = data
_, *joint_categories = joint_categories_data
path, buffer = image_data
category = pathlib.Path(path).parent.name
return dict(
joint_categories={category for category in joint_categories if category},
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
splits_dp, joint_categories_dp, images_dp = Demultiplexer(
archive_dp, 3, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
splits_dp = Filter(splits_dp, path_comparator("name", f"{self._split}{self._fold}.txt"))
splits_dp = LineReader(splits_dp, decode=True, return_path=False)
splits_dp = hint_shuffling(splits_dp)
splits_dp = hint_sharding(splits_dp)
joint_categories_dp = CSVParser(joint_categories_dp, delimiter=" ")
dp = IterKeyZipper(
splits_dp,
joint_categories_dp,
key_fn=getitem(),
ref_key_fn=getitem(0),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._image_key_fn,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _filter_images(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == DTDDemux.IMAGES
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, self._filter_images)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
def __len__(self) -> int:
return 1_880 # All splits have the same length
|
class MissingConfigError(Exception):
"""The attempted operation requires configuration which is not available"""
class NeedConfirmation(Exception):
"""The user must explicitly confirm that they want to proceed"""
class InsufficientBalanceError(ValueError):
user_id: str
message: str
balance: float
amount: float
def __init__(self, message: str, user_id: str, balance: float, amount: float):
super().__init__(message)
self.args = (message, user_id, balance, amount)
self.message = message
self.user_id = user_id
self.balance = balance
self.amount = amount
def __str__(self):
"""Used to display the error message in the frontend, because we str() the error when sending the execution update"""
return self.message
|
class MissingConfigError(Exception):
"""The attempted operation requires configuration which is not available"""
class NeedConfirmation(Exception):
"""The user must explicitly confirm that they want to proceed"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_video_metric import BaseVideoMetric
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .coco_video_metric import CocoVideoMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .mot_challenge_metric import MOTChallengeMetric
from .openimages_metric import OpenImagesMetric
from .voc_metric import VOCMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults', 'BaseVideoMetric',
'MOTChallengeMetric', 'CocoVideoMetric'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .openimages_metric import OpenImagesMetric
from .voc_metric import VOCMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults'
]
|
"""
Prompts for implementing Chain of Abstraction.
While official prompts are not given (and the paper finetunes models for the task),
we can take inspiration and use few-shot prompting to generate a prompt for implementing
chain of abstraction in an LLM agent.
"""
REASONING_PROMPT_TEMPALTE = """Generate an abstract plan of reasoning using placeholders for the specific values and function calls needed.
The placeholders should be labeled y1, y2, etc.
Function calls should be represented as inline strings like [FUNC {{function_name}}({{input1}}, {{input2}}, ...) = {{output_placeholder}}].
Assume someone will read the plan after the functions have been executed in order to make a final response.
Not every question will require function calls to answer.
If you do invoke a function, only use the available functions, do not make up functions.
Example:
-----------
Available functions:
```python
def add(a: int, b: int) -> int:
\"\"\"Add two numbers together.\"\"\"
...
def multiply(a: int, b: int) -> int:
\"\"\"Multiply two numbers together.\"\"\"
...
```
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Abstract plan of reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = y1] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply("y1", 3) = y2] apples.
Your Turn:
-----------
Available functions:
```python
{functions}
```
Question:
{question}
Abstract plan of reasoning:
"""
REFINE_REASONING_PROMPT_TEMPALTE = """Generate a response to a question by using a previous abstract plan of reasoning. Use the previous reasoning as context to write a response to the question.
Example:
-----------
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Previous reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = 5] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(5, 3) = 15] apples.
Response:
After the wizard casts the spell, Sally has 15 apples.
Your Turn:
-----------
Question:
{question}
Previous reasoning:
{prev_reasoning}
Response:
"""
|
"""
Prompts for implementing Chain of Abstraction.
While official prompts are not given (and the paper finetunes models for the task),
we can take inspiration and use few-shot prompting to generate a prompt for implementing
chain of abstraction in an LLM agent.
"""
REASONING_PROMPT_TEMPALTE = """Generate an abstract plan of reasoning using placeholders for the specific values and function calls needed.
The placeholders should be labeled y1, y2, etc.
Function calls should be represented as inline strings like [FUNC {{function_name}}({{input1}}, {{input2}}, ...) = {{output_placeholder}}].
Assume someone will read the plan after the functions have been executed in order to make a final response.
Not every question will require function calls to answer.
If you do invoke a function, only use the available functions, do not make up functions.
Example:
-----------
Available functions:
```python
def add(a: int, b: int) -> int:
\"\"\"Add two numbers together.\"\"\"
...
def multiply(a: int, b: int) -> int:
\"\"\"Multiply two numbers together.\"\"\"
...
```
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Abstract plan of reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = y1] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(y1, 3) = y2] apples.
Your Turn:
-----------
Available functions:
```python
{functions}
```
Question:
{question}
Abstract plan of reasoning:
"""
REFINE_REASONING_PROMPT_TEMPALTE = """Generate a response to a question by using a previous abstract plan of reasoning. Use the previous reasoning as context to write a response to the question.
Example:
-----------
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Previous reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = 5] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(5, 3) = 15] apples.
Response:
After the wizard casts the spell, Sally has 15 apples.
Your Turn:
-----------
Question:
{question}
Previous reasoning:
{prev_reasoning}
Response:
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from torch import nn
from mmengine.hooks import OptimizerHook
class TestOptimizerHook:
def test_after_train_iter(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=1,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
self.conv2 = nn.Conv2d(
in_channels=2,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
self.conv3 = nn.Conv2d(
in_channels=1,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
return x1, x2
model = Model()
x = torch.rand(1, 1, 3, 3)
dummy_runner = Mock()
dummy_runner.optimizer.zero_grad = Mock(return_value=None)
dummy_runner.optimizer.step = Mock(return_value=None)
dummy_runner.model = model
dummy_runner.outputs = dict()
dummy_runner.outputs['num_samples'] = 0
class DummyLogger():
def __init__(self):
self.msg = ''
def log(self, msg=None, **kwargs):
self.msg += msg
dummy_runner.logger = DummyLogger()
optimizer_hook = OptimizerHook(
dict(max_norm=2), detect_anomalous_params=True)
dummy_runner.outputs['loss'] = model(x)[0].sum()
dummy_runner.outputs['loss'].backward = Mock(
wraps=dummy_runner.outputs['loss'].backward)
optimizer_hook.detect_anomalous_parameters = Mock(
wraps=optimizer_hook.detect_anomalous_parameters)
optimizer_hook.clip_grads = Mock(wraps=optimizer_hook.clip_grads)
optimizer_hook.after_train_iter(dummy_runner, 0)
# assert the parameters of conv2 and conv3 are not in the
# computational graph which is with x1.sum() as root.
assert 'conv2.weight' in dummy_runner.logger.msg
assert 'conv2.bias' in dummy_runner.logger.msg
assert 'conv3.weight' in dummy_runner.logger.msg
assert 'conv3.bias' in dummy_runner.logger.msg
assert 'conv1.weight' not in dummy_runner.logger.msg
assert 'conv1.bias' not in dummy_runner.logger.msg
dummy_runner.optimizer.step.assert_called()
dummy_runner.outputs['loss'].backward.assert_called()
optimizer_hook.clip_grads.assert_called()
optimizer_hook.detect_anomalous_parameters.assert_called()
dummy_runner.outputs['loss'] = model(x)[1].sum()
dummy_runner.logger.msg = ''
optimizer_hook.after_train_iter(dummy_runner, 0)
# assert the parameters of conv3 are not in the computational graph
assert 'conv3.weight' in dummy_runner.logger.msg
assert 'conv3.bias' in dummy_runner.logger.msg
assert 'conv2.weight' not in dummy_runner.logger.msg
assert 'conv2.bias' not in dummy_runner.logger.msg
assert 'conv1.weight' not in dummy_runner.logger.msg
assert 'conv1.bias' not in dummy_runner.logger.msg
# grad_clip is None and detect_anomalous_parameters is False
optimizer_hook = OptimizerHook(detect_anomalous_params=False)
optimizer_hook.detect_anomalous_parameters = Mock(
wraps=optimizer_hook.detect_anomalous_parameters)
optimizer_hook.clip_grads = Mock(wraps=optimizer_hook.clip_grads)
dummy_runner.outputs['loss'] = model(x)[0].sum()
dummy_runner.outputs['loss'].backward = Mock(
wraps=dummy_runner.outputs['loss'].backward)
optimizer_hook.after_train_iter(dummy_runner, 0)
dummy_runner.optimizer.step.assert_called()
dummy_runner.outputs['loss'].backward.assert_called()
optimizer_hook.clip_grads.assert_not_called()
optimizer_hook.detect_anomalous_parameters.assert_not_called()
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from torch import nn
from mmengine.hooks import OptimizerHook
class TestOptimizerHook:
def test_after_train_iter(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=1,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
self.conv2 = nn.Conv2d(
in_channels=2,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
self.conv3 = nn.Conv2d(
in_channels=1,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
return x1, x2
model = Model()
x = torch.rand(1, 1, 3, 3)
dummy_runner = Mock()
dummy_runner.optimizer.zero_grad = Mock(return_value=None)
dummy_runner.optimizer.step = Mock(return_value=None)
dummy_runner.model = model
dummy_runner.outputs = dict()
dummy_runner.outputs['num_samples'] = 0
class DummyLogger():
def __init__(self):
self.msg = ''
def log(self, msg=None, **kwargs):
self.msg += msg
dummy_runner.logger = DummyLogger()
optimizer_hook = OptimizerHook(
dict(max_norm=2), detect_anomalous_params=True)
dummy_runner.outputs['loss'] = model(x)[0].sum()
dummy_runner.outputs['loss'].backward = Mock(
wraps=dummy_runner.outputs['loss'].backward)
optimizer_hook.detect_anomalous_parameters = Mock(
wraps=optimizer_hook.detect_anomalous_parameters)
optimizer_hook.clip_grads = Mock(wraps=optimizer_hook.clip_grads)
optimizer_hook.after_train_iter(dummy_runner)
# assert the parameters of conv2 and conv3 are not in the
# computational graph which is with x1.sum() as root.
assert 'conv2.weight' in dummy_runner.logger.msg
assert 'conv2.bias' in dummy_runner.logger.msg
assert 'conv3.weight' in dummy_runner.logger.msg
assert 'conv3.bias' in dummy_runner.logger.msg
assert 'conv1.weight' not in dummy_runner.logger.msg
assert 'conv1.bias' not in dummy_runner.logger.msg
dummy_runner.optimizer.step.assert_called()
dummy_runner.outputs['loss'].backward.assert_called()
optimizer_hook.clip_grads.assert_called()
optimizer_hook.detect_anomalous_parameters.assert_called()
dummy_runner.outputs['loss'] = model(x)[1].sum()
dummy_runner.logger.msg = ''
optimizer_hook.after_train_iter(dummy_runner)
# assert the parameters of conv3 are not in the computational graph
assert 'conv3.weight' in dummy_runner.logger.msg
assert 'conv3.bias' in dummy_runner.logger.msg
assert 'conv2.weight' not in dummy_runner.logger.msg
assert 'conv2.bias' not in dummy_runner.logger.msg
assert 'conv1.weight' not in dummy_runner.logger.msg
assert 'conv1.bias' not in dummy_runner.logger.msg
# grad_clip is None and detect_anomalous_parameters is False
optimizer_hook = OptimizerHook(detect_anomalous_params=False)
optimizer_hook.detect_anomalous_parameters = Mock(
wraps=optimizer_hook.detect_anomalous_parameters)
optimizer_hook.clip_grads = Mock(wraps=optimizer_hook.clip_grads)
dummy_runner.outputs['loss'] = model(x)[0].sum()
dummy_runner.outputs['loss'].backward = Mock(
wraps=dummy_runner.outputs['loss'].backward)
optimizer_hook.after_train_iter(dummy_runner)
dummy_runner.optimizer.step.assert_called()
dummy_runner.outputs['loss'].backward.assert_called()
optimizer_hook.clip_grads.assert_not_called()
optimizer_hook.detect_anomalous_parameters.assert_not_called()
|
"""You Retriever."""
import logging
import os
import warnings
from typing import Any, Dict, List, Literal, Optional
import requests
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
logger = logging.getLogger(__name__)
class YouRetriever(BaseRetriever):
"""
Retriever for You.com's Search and News API.
[API reference](https://documentation.you.com/api-reference/)
Args:
api_key: you.com API key, if `YDC_API_KEY` is not set in the environment
endpoint: you.com endpoints
num_web_results: The max number of web results to return, must be under 20
safesearch: Safesearch settings, one of "off", "moderate", "strict", defaults to moderate
country: Country code, ex: 'US' for United States, see API reference for more info
search_lang: (News API) Language codes, ex: 'en' for English, see API reference for more info
ui_lang: (News API) User interface language for the response, ex: 'en' for English, see API reference for more info
spellcheck: (News API) Whether to spell check query or not, defaults to True
"""
def __init__(
self,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
endpoint: Literal["search", "news"] = "search",
num_web_results: Optional[int] = None,
safesearch: Optional[Literal["off", "moderate", "strict"]] = None,
country: Optional[str] = None,
search_lang: Optional[str] = None,
ui_lang: Optional[str] = None,
spellcheck: Optional[bool] = None,
) -> None:
"""Init params."""
# Should deprecate `YOU_API_KEY` in favour of `YDC_API_KEY` for standardization purposes
self._api_key = api_key or os.getenv("YOU_API_KEY") or os.environ["YDC_API_KEY"]
super().__init__(callback_manager)
if endpoint not in ("search", "news"):
raise ValueError('`endpoint` must be either "search" or "news"')
# Raise warning if News API-specific fields are set but endpoint is not "news"
if endpoint != "news":
news_api_fields = (search_lang, ui_lang, spellcheck)
for field in news_api_fields:
if field:
warnings.warn(
(
f"News API-specific field '{field}' is set but `{endpoint=}`. "
"This will have no effect."
),
UserWarning,
)
self.endpoint = endpoint
self.num_web_results = num_web_results
self.safesearch = safesearch
self.country = country
self.search_lang = search_lang
self.ui_lang = ui_lang
self.spellcheck = spellcheck
def _generate_params(self, query: str) -> Dict[str, Any]:
params = {"safesearch": self.safesearch, "country": self.country}
if self.endpoint == "search":
params.update(
query=query,
num_web_results=self.num_web_results,
)
elif self.endpoint == "news":
params.update(
q=query,
count=self.num_web_results,
search_lang=self.search_lang,
ui_lang=self.ui_lang,
spellcheck=self.spellcheck,
)
# Remove `None` values
return {k: v for k, v in params.items() if v is not None}
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve."""
headers = {"X-API-Key": self._api_key}
params = self._generate_params(query_bundle.query_str)
response = requests.get(
f"https://api.ydc-index.io/{self.endpoint}",
params=params,
headers=headers,
)
response.raise_for_status()
results = response.json()
nodes: List[TextNode] = []
if self.endpoint == "search":
for hit in results["hits"]:
nodes.append(
TextNode(
text="\n".join(hit["snippets"]),
)
)
else: # news endpoint
for article in results["news"]["results"]:
node = TextNode(
text=article["description"],
extra_info={"url": article["url"], "age": article["age"]},
)
nodes.append(node)
return [NodeWithScore(node=node, score=1.0) for node in nodes]
|
"""You Retriever."""
import logging
import os
import warnings
from typing import Any, Dict, List, Literal, Optional
import requests
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
logger = logging.getLogger(__name__)
class YouRetriever(BaseRetriever):
"""
Retriever for You.com's Search and News API.
[API reference](https://documentation.you.com/api-reference/)
Args:
api_key: you.com API key, if `YDC_API_KEY` is not set in the environment
endpoint: you.com endpoints
num_web_results: The max number of web results to return, must be under 20
safesearch: Safesearch settings, one of "off", "moderate", "strict", defaults to moderate
country: Country code, ex: 'US' for United States, see API reference for more info
search_lang: (News API) Language codes, ex: 'en' for English, see API reference for more info
ui_lang: (News API) User interface language for the response, ex: 'en' for English, see API reference for more info
spellcheck: (News API) Whether to spell check query or not, defaults to True
"""
def __init__(
self,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
endpoint: Literal["search", "news"] = "search",
num_web_results: Optional[int] = None,
safesearch: Optional[Literal["off", "moderate", "strict"]] = None,
country: Optional[str] = None,
search_lang: Optional[str] = None,
ui_lang: Optional[str] = None,
spellcheck: Optional[bool] = None,
) -> None:
"""Init params."""
# Should deprecate `YOU_API_KEY` in favour of `YDC_API_KEY` for standardization purposes
self._api_key = api_key or os.getenv("YOU_API_KEY") or os.environ["YDC_API_KEY"]
super().__init__(callback_manager)
if endpoint not in ("search", "news"):
raise ValueError('`endpoint` must be either "search" or "news"')
# Raise warning if News API-specific fields are set but endpoint is not "news"
if endpoint != "news":
news_api_fields = (search_lang, ui_lang, spellcheck)
for field in news_api_fields:
if field:
warnings.warn(
(
f"News API-specific field '{field}' is set but `{endpoint=}`. "
"This will have no effect."
),
UserWarning,
)
self.endpoint = endpoint
self.num_web_results = num_web_results
self.safesearch = safesearch
self.country = country
self.search_lang = search_lang
self.ui_lang = ui_lang
self.spellcheck = spellcheck
def _generate_params(self, query: str) -> Dict[str, Any]:
params = {"safesearch": self.safesearch, "country": self.country}
if self.endpoint == "search":
params.update(
query=query,
num_web_results=self.num_web_results,
)
elif self.endpoint == "news":
params.update(
q=query,
count=self.num_web_results,
search_lang=self.search_lang,
ui_lang=self.ui_lang,
spellcheck=self.spellcheck,
)
# Remove `None` values
return {k: v for k, v in params.items() if v is not None}
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve."""
headers = {"X-API-Key": self._api_key}
params = self._generate_params(query_bundle.query_str)
response = requests.get(
f"https://api.ydc-index.io/{self.endpoint}",
params=params,
headers=headers,
)
response.raise_for_status()
results = response.json()
nodes: List[TextNode] = []
if self.endpoint == "search":
for hit in results["hits"]:
nodes.append(
TextNode(
text="\n".join(hit["snippets"]),
)
)
else: # news endpoint
for article in results["news"]["results"]:
node = TextNode(
text=article["description"],
extra_info={"url": article["url"], "age": article["age"]},
)
nodes.append(node)
return [NodeWithScore(node=node, score=1.0) for node in nodes]
|
#!/usr/bin/env python
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fill examples with bitext up to max_tokens without breaking up examples.
[['I went', 'yo fui'],
['to the store', 'a la tienda']
]
=> ['I went to the store', 'yo fui a la tienda']
"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def pack_examples(tok, src_examples, tgt_examples, max_tokens=1024):
finished_src, finished_tgt = [], []
sorted_examples = list(zip(src_examples, tgt_examples))
new_src, new_tgt = sorted_examples[0]
def is_too_big(strang):
return tok(strang, return_tensors="pt").input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:]):
cand_src = new_src + " " + src
cand_tgt = new_tgt + " " + tgt
if is_too_big(cand_src) or is_too_big(cand_tgt): # can't fit, finalize example
finished_src.append(new_src)
finished_tgt.append(new_tgt)
new_src, new_tgt = src, tgt
else: # can fit, keep adding
new_src, new_tgt = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(new_src)
finished_tgt.append(new_tgt)
return finished_src, finished_tgt
def pack_data_dir(tok, data_dir: Path, max_tokens, save_path):
save_path = Path(save_path)
save_path.mkdir(exist_ok=True)
for split in ["train"]:
src_path, tgt_path = data_dir / f"{split}.source", data_dir / f"{split}.target"
src_docs = [x.rstrip() for x in Path(src_path).open().readlines()]
tgt_docs = [x.rstrip() for x in Path(tgt_path).open().readlines()]
packed_src, packed_tgt = pack_examples(tok, src_docs, tgt_docs, max_tokens)
print(f"packed {split} split from {len(src_docs)} examples -> {len(packed_src)}.")
Path(save_path / f"{split}.source").open("w").write("\n".join(packed_src))
Path(save_path / f"{split}.target").open("w").write("\n".join(packed_tgt))
for split in ["val", "test"]:
src_path, tgt_path = data_dir / f"{split}.source", data_dir / f"{split}.target"
shutil.copyfile(src_path, save_path / f"{split}.source")
shutil.copyfile(tgt_path, save_path / f"{split}.target")
def packer_cli():
parser = argparse.ArgumentParser()
parser.add_argument("--tok_name", type=str, help="like facebook/bart-large-cnn,google-t5/t5-base, etc.")
parser.add_argument("--max_seq_len", type=int, default=128)
parser.add_argument("--data_dir", type=str)
parser.add_argument("--save_path", type=str)
args = parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained(args.tok_name)
return pack_data_dir(tokenizer, Path(args.data_dir), args.max_seq_len, args.save_path)
if __name__ == "__main__":
packer_cli()
|
#!/usr/bin/env python
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fill examples with bitext up to max_tokens without breaking up examples.
[['I went', 'yo fui'],
['to the store', 'a la tienda']
]
=> ['I went to the store', 'yo fui a la tienda']
"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def pack_examples(tok, src_examples, tgt_examples, max_tokens=1024):
finished_src, finished_tgt = [], []
sorted_examples = list(zip(src_examples, tgt_examples))
new_src, new_tgt = sorted_examples[0]
def is_too_big(strang):
return tok(strang, return_tensors="pt").input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:]):
cand_src = new_src + " " + src
cand_tgt = new_tgt + " " + tgt
if is_too_big(cand_src) or is_too_big(cand_tgt): # cant fit, finalize example
finished_src.append(new_src)
finished_tgt.append(new_tgt)
new_src, new_tgt = src, tgt
else: # can fit, keep adding
new_src, new_tgt = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(new_src)
finished_tgt.append(new_tgt)
return finished_src, finished_tgt
def pack_data_dir(tok, data_dir: Path, max_tokens, save_path):
save_path = Path(save_path)
save_path.mkdir(exist_ok=True)
for split in ["train"]:
src_path, tgt_path = data_dir / f"{split}.source", data_dir / f"{split}.target"
src_docs = [x.rstrip() for x in Path(src_path).open().readlines()]
tgt_docs = [x.rstrip() for x in Path(tgt_path).open().readlines()]
packed_src, packed_tgt = pack_examples(tok, src_docs, tgt_docs, max_tokens)
print(f"packed {split} split from {len(src_docs)} examples -> {len(packed_src)}.")
Path(save_path / f"{split}.source").open("w").write("\n".join(packed_src))
Path(save_path / f"{split}.target").open("w").write("\n".join(packed_tgt))
for split in ["val", "test"]:
src_path, tgt_path = data_dir / f"{split}.source", data_dir / f"{split}.target"
shutil.copyfile(src_path, save_path / f"{split}.source")
shutil.copyfile(tgt_path, save_path / f"{split}.target")
def packer_cli():
parser = argparse.ArgumentParser()
parser.add_argument("--tok_name", type=str, help="like facebook/bart-large-cnn,google-t5/t5-base, etc.")
parser.add_argument("--max_seq_len", type=int, default=128)
parser.add_argument("--data_dir", type=str)
parser.add_argument("--save_path", type=str)
args = parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained(args.tok_name)
return pack_data_dir(tokenizer, Path(args.data_dir), args.max_seq_len, args.save_path)
if __name__ == "__main__":
packer_cli()
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import Cutmix, Mixup, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import ToTensor # usort: skip
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import Cutmix, Mixup, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBox, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBox,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import ToTensor # usort: skip
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
import tempfile
import os
import time
from typing import Dict
import pytest
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(
os.path.join(cur_dir, 'unit', 'array', 'docker-compose.yml')
)
milvus_compose_yml = os.path.abspath(
os.path.join(cur_dir, 'unit', 'array', 'milvus-docker-compose.yml')
)
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'docarray_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='module')
def start_storage():
os.system(
f"docker-compose -f {compose_yml} --project-directory . up --build -d "
f"--remove-orphans"
)
os.system(
f"docker-compose -f {milvus_compose_yml} --project-directory . up --build -d"
)
_wait_for_es()
_wait_for_milvus()
yield
os.system(
f"docker-compose -f {compose_yml} --project-directory . down "
f"--remove-orphans"
)
os.system(
f"docker-compose -f {milvus_compose_yml} --project-directory . down "
f"--remove-orphans"
)
def restart_milvus():
os.system(f"docker-compose -f {milvus_compose_yml} --project-directory . down")
os.system(
f"docker-compose -f {milvus_compose_yml} --project-directory . up --build -d"
)
_wait_for_milvus(restart_on_failure=False)
def _wait_for_es():
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='http://localhost:9200/')
while not es.ping():
time.sleep(0.5)
def _wait_for_milvus(restart_on_failure=True):
from pymilvus import connections, has_collection
from pymilvus.exceptions import MilvusUnavailableException, MilvusException
milvus_conn_alias = f'pytest_localhost_19530'
try:
connections.connect(alias=milvus_conn_alias, host='localhost', port=19530)
milvus_ready = False
while not milvus_ready:
try:
has_collection('ping', using=milvus_conn_alias)
milvus_ready = True
except MilvusUnavailableException:
# Milvus is not ready yet, just wait
time.sleep(0.5)
except MilvusException as e:
if e.code == 1 and restart_on_failure:
# something went wrong with the docker container, restart and retry once
restart_milvus()
else:
raise e
@pytest.fixture(scope='session')
def set_env_vars(request):
_old_environ = dict(os.environ)
os.environ.update(request.param)
yield
os.environ.clear()
os.environ.update(_old_environ)
|
import tempfile
import os
import time
from typing import Dict
import pytest
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(
os.path.join(cur_dir, 'unit', 'array', 'docker-compose.yml')
)
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'docarray_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='module')
def start_storage():
os.system(
f"docker-compose -f {compose_yml} --project-directory . up --build -d "
f"--remove-orphans"
)
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='http://localhost:9200/')
while not es.ping():
time.sleep(0.5)
yield
os.system(
f"docker-compose -f {compose_yml} --project-directory . down "
f"--remove-orphans"
)
@pytest.fixture(scope='session')
def set_env_vars(request):
_old_environ = dict(os.environ)
os.environ.update(request.param)
yield
os.environ.clear()
os.environ.update(_old_environ)
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/lic enses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Optional
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..integrations import replace_with_spqr_linear
from ..utils import is_accelerate_available, is_spqr_available, is_torch_available, logging
from ..utils.quantization_config import QuantizationConfigMixin
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class SpQRHfQuantizer(HfQuantizer):
"""
Quantizer of the SpQR method. Enables the loading of prequantized models.
"""
requires_calibration = True
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not torch.cuda.is_available():
raise RuntimeError("GPU is required to run SpQR quantized model.")
if not is_accelerate_available():
raise ImportError("Using `spqr` quantization requires Accelerate: `pip install accelerate`")
if not is_spqr_available():
raise ImportError("Using `spqr` quantization requires SpQR: `pip install spqr_quant[gpu]`")
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
if torch_dtype is None:
torch_dtype = torch.float16
logger.info("Assuming SpQR inference on GPU and loading the model in `torch.float16`.")
elif torch_dtype != torch.float16:
raise ValueError(
"You cannot use any type other than torch.float16 for SpQR. Please either leave it None or set it to"
"torch.float16 explicitly."
)
return torch_dtype
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: Optional[list[str]] = None,
**kwargs,
):
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
replace_with_spqr_linear(
model,
quantization_config=self.quantization_config,
modules_to_not_convert=self.modules_to_not_convert,
)
model.config.quantization_config = self.quantization_config
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
@property
def is_trainable(self, model: Optional["PreTrainedModel"] = None):
return False
def is_serializable(self, safe_serialization=None):
return True
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/lic enses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, List, Optional
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..integrations import replace_with_spqr_linear
from ..utils import is_accelerate_available, is_spqr_available, is_torch_available, logging
from ..utils.quantization_config import QuantizationConfigMixin
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class SpQRHfQuantizer(HfQuantizer):
"""
Quantizer of the SpQR method. Enables the loading of prequantized models.
"""
requires_calibration = True
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not torch.cuda.is_available():
raise RuntimeError("GPU is required to run SpQR quantized model.")
if not is_accelerate_available():
raise ImportError("Using `spqr` quantization requires Accelerate: `pip install accelerate`")
if not is_spqr_available():
raise ImportError("Using `spqr` quantization requires SpQR: `pip install spqr_quant[gpu]`")
def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
if torch_dtype is None:
torch_dtype = torch.float16
logger.info("Assuming SpQR inference on GPU and loading the model in `torch.float16`.")
elif torch_dtype != torch.float16:
raise ValueError(
"You cannot use any type other than torch.float16 for SpQR. Please either leave it None or set it to"
"torch.float16 explicitly."
)
return torch_dtype
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: Optional[List[str]] = None,
**kwargs,
):
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
replace_with_spqr_linear(
model,
quantization_config=self.quantization_config,
modules_to_not_convert=self.modules_to_not_convert,
)
model.config.quantization_config = self.quantization_config
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
@property
def is_trainable(self, model: Optional["PreTrainedModel"] = None):
return False
def is_serializable(self, safe_serialization=None):
return True
|
"""Quip reader."""
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from typing import Any, Dict, List, Optional
import requests # type: ignore
import time
from pydantic import Field
BASE_URL = "https://platform.quip.com"
class QuipReader(BasePydanticReader):
access_token: str = Field(description="Quip API access token")
request_timeout: Optional[float] = Field(
default=None, description="Request timeout in seconds"
)
headers: Dict[str, str] = Field(
default=None, description="Headers to be sent with the request"
)
def __init__(
self,
access_token: str,
request_timeout: Optional[float] = None,
headers: Optional[Dict[str, str]] = None,
) -> None:
headers = headers or {}
if "Authorization" not in headers:
headers["Authorization"] = "Bearer " + access_token
super().__init__(
access_token=access_token,
request_timeout=request_timeout,
headers=headers,
)
@classmethod
def class_name(cls) -> str:
return "QuipReader"
def load_data(self, thread_ids: List[str]) -> List[Document]:
"""Load data from Quip."""
documents = []
thread_contents = self._get_threads(thread_ids)
for i, content in enumerate(thread_contents):
doc = Document(
text=content, id_=thread_ids[i], extra_info={"thread_id": thread_ids[i]}
)
documents.append(doc)
return documents
def _get_threads(self, ids: List[str]) -> List[str]:
"""Returns a dictionary of threads for the given IDs."""
threads = []
for id in ids:
thread_content = self._get_thread(id)
threads.append(thread_content)
return threads
def _get_thread(self, id) -> str:
"""Returns the thread with the given ID."""
url = BASE_URL + "/2/" + "threads/" + id + "/html"
return self._request_with_retry("GET", url, headers=self.headers).get(
"html", ""
)
def _request_with_retry(
self,
method: str,
url: str,
headers: Dict[str, str],
json: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Make a request with retry and rate limit handling."""
max_retries = 5
backoff_factor = 1
for attempt in range(max_retries):
try:
response = requests.request(method, url, headers=headers, json=json)
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError:
if response.status_code == 429:
# Rate limit exceeded
retry_after = int(response.headers.get("Retry-After", 1))
time.sleep(backoff_factor * (2**attempt) + retry_after)
else:
raise requests.exceptions.HTTPError(
f"Request failed: {response.text}"
)
except requests.exceptions.RequestException as err:
raise requests.exceptions.RequestException(f"Request failed: {err}")
raise Exception("Maximum retries exceeded")
|
"""Quip reader."""
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from typing import Any, Dict, List, Optional
import requests # type: ignore
import time
from pydantic import Field
BASE_URL = "https://platform.quip.com"
class QuipReader(BasePydanticReader):
access_token: str = Field(description="Quip API access token")
request_timeout: Optional[float] = Field(
default=None, description="Request timeout in seconds"
)
headers: Dict[str, str] = Field(
default=None, description="Headers to be sent with the request"
)
def __init__(
self,
access_token: str,
request_timeout: Optional[float] = None,
headers: Optional[Dict[str, str]] = None,
) -> None:
headers = headers or {}
if "Authorization" not in headers:
headers["Authorization"] = "Bearer " + access_token
super().__init__(
access_token=access_token,
request_timeout=request_timeout,
headers=headers,
)
@classmethod
def class_name(cls) -> str:
return "QuipReader"
def load_data(self, thread_ids: List[str]) -> List[Document]:
"""Load data from Quip."""
documents = []
thread_contents = self._get_threads(thread_ids)
for i, content in enumerate(thread_contents):
doc = Document(
text=content, id_=thread_ids[i], extra_info={"thread_id": thread_ids[i]}
)
documents.append(doc)
return documents
def _get_threads(self, ids: List[str]) -> List[str]:
"""Returns a dictionary of threads for the given IDs."""
threads = []
for id in ids:
thread_content = self._get_thread(id)
threads.append(thread_content)
return threads
def _get_thread(self, id) -> str:
"""Returns the thread with the given ID."""
url = BASE_URL + "/2/" + "threads/" + id + "/html"
return self._request_with_retry("GET", url, headers=self.headers).get(
"html", ""
)
def _request_with_retry(
self,
method: str,
url: str,
headers: Dict[str, str],
json: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Make a request with retry and rate limit handling."""
max_retries = 5
backoff_factor = 1
for attempt in range(max_retries):
try:
response = requests.request(method, url, headers=headers, json=json)
response.raise_for_status()
return response.json()
except requests.exceptions.HTTPError:
if response.status_code == 429:
# Rate limit exceeded
retry_after = int(response.headers.get("Retry-After", 1))
time.sleep(backoff_factor * (2**attempt) + retry_after)
else:
raise requests.exceptions.HTTPError(
f"Request failed: {response.text}"
)
except requests.exceptions.RequestException as err:
raise requests.exceptions.RequestException(f"Request failed: {err}")
raise Exception("Maximum retries exceeded")
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
# Align with Detectron2
backend = 'pillow'
train_pipeline = [
dict(
type='LoadImageFromFile',
backend_args=backend_args,
imdecode_backend=backend),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True,
backend=backend),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
backend_args=backend_args,
imdecode_backend=backend),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend=backend),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
pin_memory=True,
sampler=dict(type='InfiniteSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
pin_memory=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
# training schedule for 90k
max_iter = 90000
train_cfg = dict(
type='IterBasedTrainLoop', max_iters=max_iter, val_interval=10000)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_iter,
by_epoch=False,
milestones=[60000, 80000],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
# Align with Detectron2
backend = 'pillow'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args=file_client_args,
imdecode_backend=backend),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True,
backend=backend),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args=file_client_args,
imdecode_backend=backend),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend=backend),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
pin_memory=True,
sampler=dict(type='InfiniteSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
pin_memory=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
# training schedule for 90k
max_iter = 90000
train_cfg = dict(
type='IterBasedTrainLoop', max_iters=max_iter, val_interval=10000)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_iter,
by_epoch=False,
milestones=[60000, 80000],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage all kinds of evaluators for computing metrics
EVALUATORS = Registry('evaluator')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize as deserialize
from keras.src.activations import get as get
from keras.src.activations import serialize as serialize
from keras.src.activations.activations import celu as celu
from keras.src.activations.activations import elu as elu
from keras.src.activations.activations import exponential as exponential
from keras.src.activations.activations import gelu as gelu
from keras.src.activations.activations import glu as glu
from keras.src.activations.activations import hard_shrink as hard_shrink
from keras.src.activations.activations import hard_sigmoid as hard_sigmoid
from keras.src.activations.activations import hard_silu as hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh as hard_tanh
from keras.src.activations.activations import leaky_relu as leaky_relu
from keras.src.activations.activations import linear as linear
from keras.src.activations.activations import log_sigmoid as log_sigmoid
from keras.src.activations.activations import log_softmax as log_softmax
from keras.src.activations.activations import mish as mish
from keras.src.activations.activations import relu as relu
from keras.src.activations.activations import relu6 as relu6
from keras.src.activations.activations import selu as selu
from keras.src.activations.activations import sigmoid as sigmoid
from keras.src.activations.activations import silu as silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink as soft_shrink
from keras.src.activations.activations import softmax as softmax
from keras.src.activations.activations import softplus as softplus
from keras.src.activations.activations import softsign as softsign
from keras.src.activations.activations import sparse_plus as sparse_plus
from keras.src.activations.activations import sparse_sigmoid as sparse_sigmoid
from keras.src.activations.activations import sparsemax as sparsemax
from keras.src.activations.activations import squareplus as squareplus
from keras.src.activations.activations import tanh as tanh
from keras.src.activations.activations import tanh_shrink as tanh_shrink
from keras.src.activations.activations import threshold as threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparse_sigmoid
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.activations.activations import threshold
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoNdArray,
VideoTorchTensor,
VideoUrl,
)
from tests import TOYDATA_DIR
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load(file_url):
url = parse_obj_as(VideoUrl, file_url)
video, audio, indices = url.load()
assert isinstance(audio, np.ndarray)
assert isinstance(audio, AudioNdArray)
assert isinstance(video, np.ndarray)
assert isinstance(video, VideoNdArray)
assert isinstance(indices, np.ndarray)
assert isinstance(indices, NdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
@pytest.mark.parametrize(
'field, attr_cls',
[
('video', VideoNdArray),
('audio', AudioNdArray),
('key_frame_indices', NdArray),
],
)
def test_load_one_of_named_tuple_results(file_url, field, attr_cls):
url = parse_obj_as(VideoUrl, file_url)
result = getattr(url.load(), field)
assert isinstance(result, np.ndarray)
assert isinstance(result, attr_cls)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_torch_tensor_field(file_url):
class MyVideoDoc(BaseDocument):
video_url: VideoUrl
tensor: Optional[VideoTorchTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, VideoTorchTensor)
def test_json_schema():
schema_json_of(VideoUrl)
def test_dump_json():
url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(VideoUrl, path_to_file)
assert isinstance(url, VideoUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
'my/local/file.mp3',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='VideoUrl'):
parse_obj_as(VideoUrl, path_to_file)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_proto_video_url(file_url):
uri = parse_obj_as(VideoUrl, file_url)
proto = uri._to_node_protobuf()
assert 'video_url' in str(proto)
def test_load_bytes():
file_url = LOCAL_VIDEO_FILE
uri = parse_obj_as(VideoUrl, file_url)
video_bytes = uri.load_bytes()
assert isinstance(video_bytes, bytes)
assert len(video_bytes) > 0
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoNdArray,
VideoTorchTensor,
VideoUrl,
)
from tests import TOYDATA_DIR
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load(file_url):
url = parse_obj_as(VideoUrl, file_url)
video, audio, indices = url.load()
assert isinstance(audio, np.ndarray)
assert isinstance(audio, AudioNdArray)
assert isinstance(video, np.ndarray)
assert isinstance(video, VideoNdArray)
assert isinstance(indices, np.ndarray)
assert isinstance(indices, NdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
@pytest.mark.parametrize(
'field, attr_cls',
[
('video', VideoNdArray),
('audio', AudioNdArray),
('key_frame_indices', NdArray),
],
)
def test_load_one_of_named_tuple_results(file_url, field, attr_cls):
url = parse_obj_as(VideoUrl, file_url)
result = getattr(url.load(), field)
assert isinstance(result, np.ndarray)
assert isinstance(result, attr_cls)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_torch_tensor_field(file_url):
class MyVideoDoc(BaseDocument):
video_url: VideoUrl
tensor: Optional[VideoTorchTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, VideoTorchTensor)
def test_json_schema():
schema_json_of(VideoUrl)
def test_dump_json():
url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(VideoUrl, path_to_file)
assert isinstance(url, VideoUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
'my/local/file.mp3',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='VideoUrl'):
parse_obj_as(VideoUrl, path_to_file)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_proto_video_url(file_url):
uri = parse_obj_as(VideoUrl, file_url)
proto = uri._to_node_protobuf()
assert 'video_url' in str(proto)
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
ReconstructionLoss,
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.models import CSRSparsity, MLMTransformer, SpladePooling, TopKActivation
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"TopKActivation",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"ReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
]
# TODO : Complete the SparseEncoder class
# TODO : Add tests for all the components
# TODO : Add in models the possibility to have the MLM head(for splade)
# TODO : Check for every loss if compatible with the SparseEncoder but should have some minor modifications for the ones not using the utils similarity function
# TODO : Same for the evaluator
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
ReconstructionLoss,
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.models import CSRSparsity, TopKActivation
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"TopKActivation",
# Losses
"CSRLoss",
"ReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
]
# TODO : Complete the SparseEncoder class
# TODO : Add tests for all the components
# TODO : Add in models the possibility to have the MLM head(for splade)
# TODO : Check for every loss if compatible with the SparseEncoder but should have some minor modifications for the ones not using the utils similarity function
# TODO : Same for the evaluator
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
CLASSES = ('face', )
PALETTE = [(0, 255, 0)]
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotation from WIDERFace XML style annotation file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(
dict(
id=img_id,
filename=osp.join(folder, filename),
width=width,
height=height))
return data_infos
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
CLASSES = ('face', )
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotation from WIDERFace XML style annotation file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(
dict(
id=img_id,
filename=osp.join(folder, filename),
width=width,
height=height))
return data_infos
|
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementation"""
def __init__(
self,
port: Optional[int] = None,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param port: The port of the Gateway, which the client should connect to.
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.port = port
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
from jina.helper import extend_rest_interface
self.app = extend_rest_interface(
get_fastapi_app(
streamer=self.streamer,
logger=self.logger,
tracing=self.tracing,
tracer_provider=self.tracer_provider,
)
)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=__default_host__,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def teardown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
"""
Stop WebSocket server
"""
self.server.should_exit = True
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
|
import logging
import os
from typing import Optional
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
class WebSocketGateway(BaseGateway):
"""WebSocket Gateway implementation"""
def __init__(
self,
port: Optional[int] = None,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: Optional[bool] = None,
**kwargs
):
"""Initialize the gateway
:param port: The port of the Gateway, which the client should connect to.
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.port = port
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.uvicorn_kwargs = uvicorn_kwargs
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
async def setup_server(self):
"""
Setup WebSocket Server
"""
from jina.helper import extend_rest_interface
self.app = extend_rest_interface(
get_fastapi_app(
streamer=self.streamer,
logger=self.logger,
tracing=self.tracing,
tracer_provider=self.tracer_provider,
)
)
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
uvicorn_kwargs = self.uvicorn_kwargs or {}
if self.ssl_keyfile and 'ssl_keyfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_keyfile'] = self.ssl_keyfile
if self.ssl_certfile and 'ssl_certfile' not in uvicorn_kwargs.keys():
uvicorn_kwargs['ssl_certfile'] = self.ssl_certfile
self.server = UviServer(
config=Config(
app=self.app,
host=__default_host__,
port=self.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs,
)
)
await self.server.setup()
async def teardown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
"""
Stop WebSocket server
"""
self.server.should_exit = True
async def run_server(self):
"""Run WebSocket server forever"""
await self.server.serve()
@property
def should_exit(self) -> bool:
"""
Boolean flag that indicates whether the gateway server should exit or not
:return: boolean flag
"""
return self.server.should_exit
|
import threading
from typing import Optional
__all__ = ["LinearBlockSparsePattern"]
def _is_valid_linear_block_sparse_pattern(
row_block_size: int, col_block_size: int
) -> bool:
return (row_block_size == 1 and col_block_size == 4) or (
row_block_size == 8 and col_block_size == 1
)
# This is a stop-gap measure as current flow does not allow module
# specific block sparse pattern.
# In fact there is no way to convey sparse pattern via module config
# of quantization flow. Thus using the global context to convey
# sparsity pattern.
# Once the flow supports it, this should be removed.
class LinearBlockSparsePattern:
rlock = threading.RLock()
row_block_size: int = 1
col_block_size: int = 4
prev_row_block_size: int = 1
prev_col_block_size: int = 4
def __init__(self, row_block_size: int = 1, col_block_size: int = 4):
assert _is_valid_linear_block_sparse_pattern(row_block_size, col_block_size)
LinearBlockSparsePattern.rlock.acquire()
LinearBlockSparsePattern.prev_row_block_size = (
LinearBlockSparsePattern.row_block_size
)
LinearBlockSparsePattern.prev_col_block_size = (
LinearBlockSparsePattern.col_block_size
)
LinearBlockSparsePattern.row_block_size = row_block_size
LinearBlockSparsePattern.col_block_size = col_block_size
def __enter__(self) -> None:
pass
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
backtrace: Optional[object],
) -> None:
LinearBlockSparsePattern.row_block_size = (
LinearBlockSparsePattern.prev_row_block_size
)
LinearBlockSparsePattern.col_block_size = (
LinearBlockSparsePattern.prev_col_block_size
)
LinearBlockSparsePattern.rlock.release()
@staticmethod
def block_size() -> tuple[int, int]:
return (
LinearBlockSparsePattern.row_block_size,
LinearBlockSparsePattern.col_block_size,
)
|
import threading
from typing import Optional
__all__ = ["LinearBlockSparsePattern"]
def _is_valid_linear_block_sparse_pattern(
row_block_size: int, col_block_size: int
) -> bool:
return (row_block_size == 1 and col_block_size == 4) or (
row_block_size == 8 and col_block_size == 1
)
# This is a stop-gap measure as current flow does not allow module
# specific block sparse pattern.
# Infact there is no way to convey sparse pattern via module config
# of quantization flow. Thus using the global context to convey
# sparsity pattern.
# Once the flow supports it, this should be removed.
class LinearBlockSparsePattern:
rlock = threading.RLock()
row_block_size: int = 1
col_block_size: int = 4
prev_row_block_size: int = 1
prev_col_block_size: int = 4
def __init__(self, row_block_size: int = 1, col_block_size: int = 4):
assert _is_valid_linear_block_sparse_pattern(row_block_size, col_block_size)
LinearBlockSparsePattern.rlock.acquire()
LinearBlockSparsePattern.prev_row_block_size = (
LinearBlockSparsePattern.row_block_size
)
LinearBlockSparsePattern.prev_col_block_size = (
LinearBlockSparsePattern.col_block_size
)
LinearBlockSparsePattern.row_block_size = row_block_size
LinearBlockSparsePattern.col_block_size = col_block_size
def __enter__(self) -> None:
pass
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
backtrace: Optional[object],
) -> None:
LinearBlockSparsePattern.row_block_size = (
LinearBlockSparsePattern.prev_row_block_size
)
LinearBlockSparsePattern.col_block_size = (
LinearBlockSparsePattern.prev_col_block_size
)
LinearBlockSparsePattern.rlock.release()
@staticmethod
def block_size() -> tuple[int, int]:
return (
LinearBlockSparsePattern.row_block_size,
LinearBlockSparsePattern.col_block_size,
)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='ImageBytes')
@_register_proto(proto_type_name='image_bytes')
class ImageBytes(bytes, AbstractType):
"""
Bytes that store an image and that can be load into an image tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
) -> np.ndarray:
"""
Load the image from the bytes into a numpy.ndarray image tensor
---
```python
from docarray import BaseDoc
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:return: np.ndarray representing the image as RGB values
"""
from PIL import Image as PILImage
raw_img = PILImage.open(BytesIO(self))
if width or height:
new_width = width or raw_img.width
new_height = height or raw_img.height
raw_img = raw_img.resize((new_width, new_height))
try:
tensor = np.array(raw_img.convert('RGB'))
except Exception:
tensor = np.array(raw_img)
return self._move_channel_axis(tensor, axis_layout=axis_layout)
@staticmethod
def _move_channel_axis(
tensor: np.ndarray, axis_layout: Tuple[str, str, str] = ('H', 'W', 'C')
) -> np.ndarray:
"""Moves channel axis around."""
channel_to_offset = {'H': 0, 'W': 1, 'C': 2}
permutation = tuple(channel_to_offset[axis] for axis in axis_layout)
return np.transpose(tensor, permutation)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='ImageBytes')
@_register_proto(proto_type_name='image_bytes')
class ImageBytes(bytes, AbstractType):
"""
Bytes that store an image and that can be load into an image tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
) -> np.ndarray:
"""
Load the image from the bytes into a numpy.ndarray image tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:return: np.ndarray representing the image as RGB values
"""
from PIL import Image as PILImage
raw_img = PILImage.open(BytesIO(self))
if width or height:
new_width = width or raw_img.width
new_height = height or raw_img.height
raw_img = raw_img.resize((new_width, new_height))
try:
tensor = np.array(raw_img.convert('RGB'))
except Exception:
tensor = np.array(raw_img)
return self._move_channel_axis(tensor, axis_layout=axis_layout)
@staticmethod
def _move_channel_axis(
tensor: np.ndarray, axis_layout: Tuple[str, str, str] = ('H', 'W', 'C')
) -> np.ndarray:
"""Moves channel axis around."""
channel_to_offset = {'H': 0, 'W': 1, 'C': 2}
permutation = tuple(channel_to_offset[axis] for axis in axis_layout)
return np.transpose(tensor, permutation)
|
import logging
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create evaluator for some NanoBEIR datasets
evaluator = SparseNanoBEIREvaluator(
dataset_names=["QuoraRetrieval", "MSMARCO"],
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
|
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create evaluator for some NanoBEIR datasets
evaluator = SparseNanoBEIREvaluator(
dataset_names=["QuoraRetrieval", "MSMARCO"],
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
print("Starting evaluation on all NanoBEIR datasets")
results = evaluator(model)
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# Print results for each dataset
for key, value in results.items():
if key.startswith("Nano"):
print(f"{key}: {value:.4f}")
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.27.0",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark>=0.2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1",
"jaxlib": "jaxlib>=0.4.1",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"parameterized": "parameterized",
"peft": "peft>=0.6.0",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ruff": "ruff==0.1.5",
"safetensors": "safetensors>=0.3.1",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"GitPython": "GitPython<3.1.19",
"scipy": "scipy",
"onnx": "onnx",
"optimum_quanto": "optimum_quanto>=0.2.6",
"gguf": "gguf>=0.10.0",
"torchao": "torchao>=0.7.0",
"bitsandbytes": "bitsandbytes>=0.43.3",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"tiktoken": "tiktoken>=0.7.0",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.41.2",
"urllib3": "urllib3<=2.0.0",
"black": "black",
"phonemizer": "phonemizer",
}
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.27.0",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark>=0.2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1",
"jaxlib": "jaxlib>=0.4.1",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"parameterized": "parameterized",
"peft": "peft>=0.6.0",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ruff": "ruff==0.1.5",
"safetensors": "safetensors>=0.3.1",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"GitPython": "GitPython<3.1.19",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"tiktoken": "tiktoken>=0.7.0",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.41.2",
"urllib3": "urllib3<=2.0.0",
"black": "black",
"phonemizer": "phonemizer",
}
|
_base_ = './retinanet_r50_caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from docarray.computation.numpy_backend import NumpyCompBackend
def test_topk_numpy():
top_k = NumpyCompBackend.Retrieval.top_k
a = np.array([1, 4, 2, 7, 4, 9, 2])
vals, indices = top_k(a, 3)
assert vals.shape == (1, 3)
assert indices.shape == (1, 3)
assert (vals.squeeze() == np.array([1, 2, 2])).all()
assert (indices.squeeze() == np.array([0, 2, 6])).all() or (
indices.squeeze() == np.array([0, 6, 2])
).all()
a = np.array([[1, 4, 2, 7, 4, 9, 2], [11, 6, 2, 7, 3, 10, 4]])
vals, indices = top_k(a, 3)
assert vals.shape == (2, 3)
assert indices.shape == (2, 3)
assert (vals[0] == np.array([1, 2, 2])).all()
assert (indices[0] == np.array([0, 2, 6])).all() or (
indices[0] == np.array([0, 6, 2])
).all()
assert (vals[1] == np.array([2, 3, 4])).all()
assert (indices[1] == np.array([2, 4, 6])).all()
|
import numpy as np
from docarray.computation.numpy_backend import NumpyCompBackend
def test_topk_numpy():
top_k = NumpyCompBackend.Retrieval.top_k
a = np.array([1, 4, 2, 7, 4, 9, 2])
vals, indices = top_k(a, 3)
assert vals.shape == (1, 3)
assert indices.shape == (1, 3)
assert (vals.squeeze() == np.array([1, 2, 2])).all()
assert (indices.squeeze() == np.array([0, 2, 6])).all() or (
indices.squeeze() == np.array([0, 6, 2])
).all()
a = np.array([[1, 4, 2, 7, 4, 9, 2], [11, 6, 2, 7, 3, 10, 4]])
vals, indices = top_k(a, 3)
assert vals.shape == (2, 3)
assert indices.shape == (2, 3)
assert (vals[0] == np.array([1, 2, 2])).all()
assert (indices[0] == np.array([0, 2, 6])).all() or (
indices[0] == np.array([0, 6, 2])
).all()
assert (vals[1] == np.array([2, 3, 4])).all()
assert (indices[1] == np.array([2, 4, 6])).all()
|
import abc
import io
import pathlib
import pickle
from collections.abc import Iterator
from typing import Any, BinaryIO, cast, Optional, Union
import numpy as np
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
class CifarFileReader(IterDataPipe[tuple[np.ndarray, int]]):
def __init__(self, datapipe: IterDataPipe[dict[str, Any]], *, labels_key: str) -> None:
self.datapipe = datapipe
self.labels_key = labels_key
def __iter__(self) -> Iterator[tuple[np.ndarray, int]]:
for mapping in self.datapipe:
image_arrays = mapping["data"].reshape((-1, 3, 32, 32))
category_idcs = mapping[self.labels_key]
yield from iter(zip(image_arrays, category_idcs))
class _CifarBase(Dataset):
_FILE_NAME: str
_SHA256: str
_LABELS_KEY: str
_META_FILE_NAME: str
_CATEGORIES_KEY: str
_categories: list[str]
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
@abc.abstractmethod
def _is_data_file(self, data: tuple[str, BinaryIO]) -> Optional[int]:
pass
def _resources(self) -> list[OnlineResource]:
return [
HttpResource(
f"https://www.cs.toronto.edu/~kriz/{self._FILE_NAME}",
sha256=self._SHA256,
)
]
def _unpickle(self, data: tuple[str, io.BytesIO]) -> dict[str, Any]:
_, file = data
content = cast(dict[str, Any], pickle.load(file, encoding="latin1"))
file.close()
return content
def _prepare_sample(self, data: tuple[np.ndarray, int]) -> dict[str, Any]:
image_array, category_idx = data
return dict(
image=Image(image_array),
label=Label(category_idx, categories=self._categories),
)
def _datapipe(self, resource_dps: list[IterDataPipe]) -> IterDataPipe[dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, self._is_data_file)
dp = Mapper(dp, self._unpickle)
dp = CifarFileReader(dp, labels_key=self._LABELS_KEY)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 50_000 if self._split == "train" else 10_000
def _generate_categories(self) -> list[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", self._META_FILE_NAME))
dp = Mapper(dp, self._unpickle)
return cast(list[str], next(iter(dp))[self._CATEGORIES_KEY])
@register_info("cifar10")
def _cifar10_info() -> dict[str, Any]:
return dict(categories=read_categories_file("cifar10"))
@register_dataset("cifar10")
class Cifar10(_CifarBase):
"""
- **homepage**: https://www.cs.toronto.edu/~kriz/cifar.html
"""
_FILE_NAME = "cifar-10-python.tar.gz"
_SHA256 = "6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce"
_LABELS_KEY = "labels"
_META_FILE_NAME = "batches.meta"
_CATEGORIES_KEY = "label_names"
_categories = _cifar10_info()["categories"]
def _is_data_file(self, data: tuple[str, Any]) -> bool:
path = pathlib.Path(data[0])
return path.name.startswith("data" if self._split == "train" else "test")
@register_info("cifar100")
def _cifar100_info() -> dict[str, Any]:
return dict(categories=read_categories_file("cifar100"))
@register_dataset("cifar100")
class Cifar100(_CifarBase):
"""
- **homepage**: https://www.cs.toronto.edu/~kriz/cifar.html
"""
_FILE_NAME = "cifar-100-python.tar.gz"
_SHA256 = "85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7"
_LABELS_KEY = "fine_labels"
_META_FILE_NAME = "meta"
_CATEGORIES_KEY = "fine_label_names"
_categories = _cifar100_info()["categories"]
def _is_data_file(self, data: tuple[str, Any]) -> bool:
path = pathlib.Path(data[0])
return path.name == self._split
|
import abc
import io
import pathlib
import pickle
from typing import Any, BinaryIO, cast, Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
class CifarFileReader(IterDataPipe[Tuple[np.ndarray, int]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]], *, labels_key: str) -> None:
self.datapipe = datapipe
self.labels_key = labels_key
def __iter__(self) -> Iterator[Tuple[np.ndarray, int]]:
for mapping in self.datapipe:
image_arrays = mapping["data"].reshape((-1, 3, 32, 32))
category_idcs = mapping[self.labels_key]
yield from iter(zip(image_arrays, category_idcs))
class _CifarBase(Dataset):
_FILE_NAME: str
_SHA256: str
_LABELS_KEY: str
_META_FILE_NAME: str
_CATEGORIES_KEY: str
_categories: List[str]
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
@abc.abstractmethod
def _is_data_file(self, data: Tuple[str, BinaryIO]) -> Optional[int]:
pass
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
f"https://www.cs.toronto.edu/~kriz/{self._FILE_NAME}",
sha256=self._SHA256,
)
]
def _unpickle(self, data: Tuple[str, io.BytesIO]) -> Dict[str, Any]:
_, file = data
content = cast(Dict[str, Any], pickle.load(file, encoding="latin1"))
file.close()
return content
def _prepare_sample(self, data: Tuple[np.ndarray, int]) -> Dict[str, Any]:
image_array, category_idx = data
return dict(
image=Image(image_array),
label=Label(category_idx, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, self._is_data_file)
dp = Mapper(dp, self._unpickle)
dp = CifarFileReader(dp, labels_key=self._LABELS_KEY)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 50_000 if self._split == "train" else 10_000
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", self._META_FILE_NAME))
dp = Mapper(dp, self._unpickle)
return cast(List[str], next(iter(dp))[self._CATEGORIES_KEY])
@register_info("cifar10")
def _cifar10_info() -> Dict[str, Any]:
return dict(categories=read_categories_file("cifar10"))
@register_dataset("cifar10")
class Cifar10(_CifarBase):
"""
- **homepage**: https://www.cs.toronto.edu/~kriz/cifar.html
"""
_FILE_NAME = "cifar-10-python.tar.gz"
_SHA256 = "6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce"
_LABELS_KEY = "labels"
_META_FILE_NAME = "batches.meta"
_CATEGORIES_KEY = "label_names"
_categories = _cifar10_info()["categories"]
def _is_data_file(self, data: Tuple[str, Any]) -> bool:
path = pathlib.Path(data[0])
return path.name.startswith("data" if self._split == "train" else "test")
@register_info("cifar100")
def _cifar100_info() -> Dict[str, Any]:
return dict(categories=read_categories_file("cifar100"))
@register_dataset("cifar100")
class Cifar100(_CifarBase):
"""
- **homepage**: https://www.cs.toronto.edu/~kriz/cifar.html
"""
_FILE_NAME = "cifar-100-python.tar.gz"
_SHA256 = "85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7"
_LABELS_KEY = "fine_labels"
_META_FILE_NAME = "meta"
_CATEGORIES_KEY = "fine_label_names"
_categories = _cifar100_info()["categories"]
def _is_data_file(self, data: Tuple[str, Any]) -> bool:
path = pathlib.Path(data[0])
return path.name == self._split
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.csp_darknet import CSPDarknet
from .utils import check_norm_state, is_norm
def test_csp_darknet_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
CSPDarknet(frozen_stages=6)
with pytest.raises(AssertionError):
# out_indices in range(len(arch_setting) + 1)
CSPDarknet(out_indices=[6])
# Test CSPDarknet with first stage frozen
frozen_stages = 1
model = CSPDarknet(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test CSPDarknet with norm_eval=True
model = CSPDarknet(norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test CSPDarknet-P5 forward with widen_factor=1.0
model = CSPDarknet(arch='P5', widen_factor=1.0, out_indices=range(0, 5))
model.init_weights()
model.train()
assert check_norm_state(model.modules(), True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 64, 112, 112))
assert feat[1].shape == torch.Size((1, 128, 56, 56))
assert feat[2].shape == torch.Size((1, 256, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
assert feat[4].shape == torch.Size((1, 1024, 7, 7))
# Test CSPDarknet-P5 forward with widen_factor=0.5
model = CSPDarknet(arch='P5', widen_factor=0.5, out_indices=range(0, 5))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 32, 112, 112))
assert feat[1].shape == torch.Size((1, 64, 56, 56))
assert feat[2].shape == torch.Size((1, 128, 28, 28))
assert feat[3].shape == torch.Size((1, 256, 14, 14))
assert feat[4].shape == torch.Size((1, 512, 7, 7))
# Test CSPDarknet-P6 forward with widen_factor=1.5
model = CSPDarknet(
arch='P6',
widen_factor=1.5,
out_indices=range(0, 6),
spp_kernal_sizes=(3, 5, 7))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 320, 320)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 96, 160, 160))
assert feat[1].shape == torch.Size((1, 192, 80, 80))
assert feat[2].shape == torch.Size((1, 384, 40, 40))
assert feat[3].shape == torch.Size((1, 768, 20, 20))
assert feat[4].shape == torch.Size((1, 1152, 10, 10))
assert feat[5].shape == torch.Size((1, 1536, 5, 5))
# Test CSPDarknet forward with dict(type='ReLU')
model = CSPDarknet(
widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 5))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 64, 112, 112))
assert feat[1].shape == torch.Size((1, 128, 56, 56))
assert feat[2].shape == torch.Size((1, 256, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
assert feat[4].shape == torch.Size((1, 1024, 7, 7))
# Test CSPDarknet with BatchNorm forward
model = CSPDarknet(widen_factor=1.0, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 64, 112, 112))
assert feat[1].shape == torch.Size((1, 128, 56, 56))
assert feat[2].shape == torch.Size((1, 256, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
assert feat[4].shape == torch.Size((1, 1024, 7, 7))
# Test CSPDarknet with custom arch forward
arch_ovewrite = [[32, 56, 3, True, False], [56, 224, 2, True, False],
[224, 512, 1, True, False]]
model = CSPDarknet(
arch_ovewrite=arch_ovewrite,
widen_factor=1.0,
out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 32, 112, 112))
assert feat[1].shape == torch.Size((1, 56, 56, 56))
assert feat[2].shape == torch.Size((1, 224, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
|
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.csp_darknet import CSPDarknet
from .utils import check_norm_state, is_norm
def test_csp_darknet_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
CSPDarknet(frozen_stages=6)
with pytest.raises(AssertionError):
# out_indices in range(len(arch_setting) + 1)
CSPDarknet(out_indices=[6])
# Test CSPDarknet with first stage frozen
frozen_stages = 1
model = CSPDarknet(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test CSPDarknet with norm_eval=True
model = CSPDarknet(norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test CSPDarknet-P5 forward with widen_factor=1.0
model = CSPDarknet(arch='P5', widen_factor=1.0, out_indices=range(0, 5))
model.init_weights()
model.train()
assert check_norm_state(model.modules(), True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 64, 112, 112))
assert feat[1].shape == torch.Size((1, 128, 56, 56))
assert feat[2].shape == torch.Size((1, 256, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
assert feat[4].shape == torch.Size((1, 1024, 7, 7))
# Test CSPDarknet-P5 forward with widen_factor=0.5
model = CSPDarknet(arch='P5', widen_factor=0.5, out_indices=range(0, 5))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 32, 112, 112))
assert feat[1].shape == torch.Size((1, 64, 56, 56))
assert feat[2].shape == torch.Size((1, 128, 28, 28))
assert feat[3].shape == torch.Size((1, 256, 14, 14))
assert feat[4].shape == torch.Size((1, 512, 7, 7))
# Test CSPDarknet-P6 forward with widen_factor=1.5
model = CSPDarknet(
arch='P6',
widen_factor=1.5,
out_indices=range(0, 6),
spp_kernal_sizes=(3, 5, 7))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 320, 320)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 96, 160, 160))
assert feat[1].shape == torch.Size((1, 192, 80, 80))
assert feat[2].shape == torch.Size((1, 384, 40, 40))
assert feat[3].shape == torch.Size((1, 768, 20, 20))
assert feat[4].shape == torch.Size((1, 1152, 10, 10))
assert feat[5].shape == torch.Size((1, 1536, 5, 5))
# Test CSPDarknet forward with dict(type='ReLU')
model = CSPDarknet(
widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 5))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 64, 112, 112))
assert feat[1].shape == torch.Size((1, 128, 56, 56))
assert feat[2].shape == torch.Size((1, 256, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
assert feat[4].shape == torch.Size((1, 1024, 7, 7))
# Test CSPDarknet with BatchNorm forward
model = CSPDarknet(widen_factor=1.0, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 64, 112, 112))
assert feat[1].shape == torch.Size((1, 128, 56, 56))
assert feat[2].shape == torch.Size((1, 256, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
assert feat[4].shape == torch.Size((1, 1024, 7, 7))
# Test CSPDarknet with custom arch forward
arch_ovewrite = [[32, 56, 3, True, False], [56, 224, 2, True, False],
[224, 512, 1, True, False]]
model = CSPDarknet(
arch_ovewrite=arch_ovewrite,
widen_factor=1.0,
out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 32, 112, 112))
assert feat[1].shape == torch.Size((1, 56, 56, 56))
assert feat[2].shape == torch.Size((1, 224, 28, 28))
assert feat[3].shape == torch.Size((1, 512, 14, 14))
|
import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
import fastapi
import fastapi.testclient
import pytest
import pytest_mock
import backend.server.v2.library.db
import backend.server.v2.library.model
import backend.server.v2.library.routes
app = fastapi.FastAPI()
app.include_router(backend.server.v2.library.routes.router)
client = fastapi.testclient.TestClient(app)
def override_auth_middleware():
"""Override auth middleware for testing"""
return {"sub": "test-user-id"}
def override_get_user_id():
"""Override get_user_id for testing"""
return "test-user-id"
app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
override_auth_middleware
)
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = override_get_user_id
def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
mocked_value = [
backend.server.v2.library.model.LibraryAgent(
id="test-agent-1",
version=1,
is_active=True,
name="Test Agent 1",
description="Test Description 1",
isCreatedByUser=True,
input_schema={"type": "object", "properties": {}},
output_schema={"type": "object", "properties": {}},
),
backend.server.v2.library.model.LibraryAgent(
id="test-agent-2",
version=1,
is_active=True,
name="Test Agent 2",
description="Test Description 2",
isCreatedByUser=False,
input_schema={"type": "object", "properties": {}},
output_schema={"type": "object", "properties": {}},
),
]
mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents")
mock_db_call.return_value = mocked_value
response = client.get("/agents")
assert response.status_code == 200
data = [
backend.server.v2.library.model.LibraryAgent.model_validate(agent)
for agent in response.json()
]
assert len(data) == 2
assert data[0].id == "test-agent-1"
assert data[0].isCreatedByUser is True
assert data[1].id == "test-agent-2"
assert data[1].isCreatedByUser is False
mock_db_call.assert_called_once_with("test-user-id")
def test_get_library_agents_error(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents")
mock_db_call.side_effect = Exception("Test error")
response = client.get("/agents")
assert response.status_code == 500
mock_db_call.assert_called_once_with("test-user-id")
@pytest.mark.skip(reason="Mocker Not implemented")
def test_add_agent_to_library_success(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.add_agent_to_library")
mock_db_call.return_value = None
response = client.post("/agents/test-version-id")
assert response.status_code == 201
mock_db_call.assert_called_once_with(
store_listing_version_id="test-version-id", user_id="test-user-id"
)
@pytest.mark.skip(reason="Mocker Not implemented")
def test_add_agent_to_library_error(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.add_agent_to_library")
mock_db_call.side_effect = Exception("Test error")
response = client.post("/agents/test-version-id")
assert response.status_code == 500
assert response.json()["detail"] == "Failed to add agent to library"
mock_db_call.assert_called_once_with(
store_listing_version_id="test-version-id", user_id="test-user-id"
)
|
import autogpt_libs.auth.depends
import autogpt_libs.auth.middleware
import fastapi
import fastapi.testclient
import pytest_mock
import backend.server.v2.library.db
import backend.server.v2.library.model
import backend.server.v2.library.routes
app = fastapi.FastAPI()
app.include_router(backend.server.v2.library.routes.router)
client = fastapi.testclient.TestClient(app)
def override_auth_middleware():
"""Override auth middleware for testing"""
return {"sub": "test-user-id"}
def override_get_user_id():
"""Override get_user_id for testing"""
return "test-user-id"
app.dependency_overrides[autogpt_libs.auth.middleware.auth_middleware] = (
override_auth_middleware
)
app.dependency_overrides[autogpt_libs.auth.depends.get_user_id] = override_get_user_id
def test_get_library_agents_success(mocker: pytest_mock.MockFixture):
mocked_value = [
backend.server.v2.library.model.LibraryAgent(
id="test-agent-1",
version=1,
is_active=True,
name="Test Agent 1",
description="Test Description 1",
isCreatedByUser=True,
input_schema={"type": "object", "properties": {}},
output_schema={"type": "object", "properties": {}},
),
backend.server.v2.library.model.LibraryAgent(
id="test-agent-2",
version=1,
is_active=True,
name="Test Agent 2",
description="Test Description 2",
isCreatedByUser=False,
input_schema={"type": "object", "properties": {}},
output_schema={"type": "object", "properties": {}},
),
]
mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents")
mock_db_call.return_value = mocked_value
response = client.get("/agents")
assert response.status_code == 200
data = [
backend.server.v2.library.model.LibraryAgent.model_validate(agent)
for agent in response.json()
]
assert len(data) == 2
assert data[0].id == "test-agent-1"
assert data[0].isCreatedByUser is True
assert data[1].id == "test-agent-2"
assert data[1].isCreatedByUser is False
mock_db_call.assert_called_once_with("test-user-id")
def test_get_library_agents_error(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.get_library_agents")
mock_db_call.side_effect = Exception("Test error")
response = client.get("/agents")
assert response.status_code == 500
mock_db_call.assert_called_once_with("test-user-id")
def test_add_agent_to_library_success(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.add_agent_to_library")
mock_db_call.return_value = None
response = client.post("/agents/test-version-id")
assert response.status_code == 201
mock_db_call.assert_called_once_with(
store_listing_version_id="test-version-id", user_id="test-user-id"
)
def test_add_agent_to_library_error(mocker: pytest_mock.MockFixture):
mock_db_call = mocker.patch("backend.server.v2.library.db.add_agent_to_library")
mock_db_call.side_effect = Exception("Test error")
response = client.post("/agents/test-version-id")
assert response.status_code == 500
assert response.json()["detail"] == "Failed to add agent to library"
mock_db_call.assert_called_once_with(
store_listing_version_id="test-version-id", user_id="test-user-id"
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import read_base
with read_base():
from ...config.py_config.test_base_variables import *
|
# Copyright (c) OpenMMLab. All rights reserved.
if '_base_':
from ...config.py_config.test_base_variables import *
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Dict
def get_metric_value(indicator: str, metrics: Dict) -> Any:
"""Get the metric value specified by an indicator, which can be either a
metric name or a full name with evaluator prefix.
Args:
indicator (str): The metric indicator, which can be the metric name
(e.g. 'AP') or the full name with prefix (e.g. 'COCO/AP')
metrics (dict): The evaluation results output by the evaluator
Returns:
Any: The specified metric value
"""
if '/' in indicator:
# The indicator is a full name
if indicator in metrics:
return metrics[indicator]
else:
raise ValueError(
f'The indicator "{indicator}" can not match any metric in '
f'{list(metrics.keys())}')
else:
# The indicator is metric name without prefix
matched = [k for k in metrics.keys() if k.split('/')[-1] == indicator]
if not matched:
raise ValueError(
f'The indicator {indicator} can not match any metric in '
f'{list(metrics.keys())}')
elif len(matched) > 1:
raise ValueError(f'The indicator "{indicator}" matches multiple '
f'metrics {matched}')
else:
return metrics[matched[0]]
|
from typing import Any, Dict
def get_metric_value(indicator: str, metrics: Dict) -> Any:
"""Get the metric value specified by an indicator, which can be either a
metric name or a full name with evaluator prefix.
Args:
indicator (str): The metric indicator, which can be the metric name
(e.g. 'AP') or the full name with prefix (e.g. 'COCO/AP')
metrics (dict): The evaluation results output by the evaluator
Returns:
Any: The specified metric value
"""
if '/' in indicator:
# The indicator is a full name
if indicator in metrics:
return metrics[indicator]
else:
raise ValueError(
f'The indicator "{indicator}" can not match any metric in '
f'{list(metrics.keys())}')
else:
# The indicator is metric name without prefix
matched = [k for k in metrics.keys() if k.split('/')[-1] == indicator]
if not matched:
raise ValueError(
f'The indicator {indicator} can not match any metric in '
f'{list(metrics.keys())}')
elif len(matched) > 1:
raise ValueError(f'The indicator "{indicator}" matches multiple '
f'metrics {matched}')
else:
return metrics[matched[0]]
|
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio import AudioNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='AudioBytes')
@_register_proto(proto_type_name='audio_bytes')
class AudioBytes(bytes, AbstractType):
"""
Bytes that store an audio and that can be load into an Audio tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self) -> Tuple[AudioNdArray, int]:
"""
Load the Audio from the AudioBytes into an AudioNdArray
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioNdArray, AudioUrl
class MyAudio(BaseDoc):
url: AudioUrl
tensor: Optional[AudioNdArray]
bytes_: Optional[AudioBytes]
frame_rate: Optional[float]
doc = MyAudio(url='https://www.kozco.com/tech/piano2.wav')
doc.bytes_ = doc.url.load_bytes()
doc.tensor, doc.frame_rate = doc.bytes_.load()
# Note this is equivalent to do
doc.tensor, doc.frame_rate = doc.url.load()
assert isinstance(doc.tensor, AudioNdArray)
```
---
:return: tuple of an AudioNdArray representing the audio bytes content,
and an integer representing the frame rate.
"""
pydub = import_library('pydub', raise_error=True) # noqa: F841
from pydub import AudioSegment
segment = AudioSegment.from_file(io.BytesIO(self))
# Convert to float32 using NumPy
samples = np.array(segment.get_array_of_samples())
# Normalise float32 array so that values are between -1.0 and +1.0
samples_norm = samples / 2 ** (segment.sample_width * 8 - 1)
return parse_obj_as(AudioNdArray, samples_norm), segment.frame_rate
|
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='AudioBytes')
@_register_proto(proto_type_name='audio_bytes')
class AudioBytes(bytes, AbstractType):
"""
Bytes that store an audio and that can be load into an Audio tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self) -> Tuple[np.ndarray, int]:
"""
Load the Audio from the bytes into a numpy.ndarray Audio tensor
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioUrl, NdArray, AudioBytes
import numpy as np
class MyAudio(BaseDoc):
url: AudioUrl
tensor: Optional[NdArray]
bytes: Optional[AudioBytes]
frame_rate: Optional[float]
doc = MyAudio(url='https://www.kozco.com/tech/piano2.wav')
doc.bytes = doc.url.load_bytes()
doc.tensor, doc.frame_rate = doc.bytes.load()
# Note this is equivalent to do
doc.tensor, doc.frame_rate = doc.url.load()
assert isinstance(doc.tensor, np.ndarray)
```
---
:return: np.ndarray representing the Audio as RGB values
"""
if TYPE_CHECKING:
import pydub
else:
pydub = import_library('pydub', raise_error=True)
segment = pydub.AudioSegment.from_file(io.BytesIO(self))
# Convert to float32 using NumPy
samples = np.array(segment.get_array_of_samples())
# Normalise float32 array so that values are between -1.0 and +1.0
samples_norm = samples / 2 ** (segment.sample_width * 8 - 1)
return samples_norm, segment.frame_rate
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.