input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from packaging import version
from .. import __version__
from .constants import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_DYNAMIC_MODULE_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MIN_PEFT_VERSION,
ONNX_EXTERNAL_WEIGHTS_NAME,
ONNX_WEIGHTS_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFETENSORS_FILE_EXTENSION,
SAFETENSORS_WEIGHTS_NAME,
USE_PEFT_BACKEND,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .deprecation_utils import deprecate
from .doc_utils import replace_example_docstring
from .dynamic_modules_utils import get_class_from_dynamic_module
from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video
from .hub_utils import (
PushToHubMixin,
_add_variant,
_get_checkpoint_shard_files,
_get_model_file,
extract_commit_hash,
http_user_agent,
)
from .import_utils import (
BACKENDS_MAPPING,
DIFFUSERS_SLOW_IMPORT,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_accelerate_available,
is_accelerate_version,
is_bitsandbytes_available,
is_bitsandbytes_version,
is_bs4_available,
is_flax_available,
is_ftfy_available,
is_google_colab,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_matplotlib_available,
is_note_seq_available,
is_onnx_available,
is_peft_available,
is_peft_version,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_tensorboard_available,
is_timm_available,
is_torch_available,
is_torch_npu_available,
is_torch_version,
is_torch_xla_available,
is_torch_xla_version,
is_torchao_available,
is_torchsde_available,
is_torchvision_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
is_wandb_available,
is_xformers_available,
requires_backends,
)
from .loading_utils import get_module_from_name, load_image, load_video
from .logging import get_logger
from .outputs import BaseOutput
from .peft_utils import (
check_peft_version,
delete_adapter_layers,
get_adapter_name,
get_peft_kwargs,
recurse_remove_peft_layers,
scale_lora_layers,
set_adapter_layers,
set_weights_and_activate_adapters,
unscale_lora_layers,
)
from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil
from .state_dict_utils import (
convert_all_state_dict_to_peft,
convert_state_dict_to_diffusers,
convert_state_dict_to_kohya,
convert_state_dict_to_peft,
convert_unet_state_dict_to_peft,
)
logger = get_logger(__name__)
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace diffusers (see "
"`https://huggingface.co/docs/diffusers/installation#install-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(error_message)
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from packaging import version
from .. import __version__
from .constants import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_DYNAMIC_MODULE_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MIN_PEFT_VERSION,
ONNX_EXTERNAL_WEIGHTS_NAME,
ONNX_WEIGHTS_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFETENSORS_FILE_EXTENSION,
SAFETENSORS_WEIGHTS_NAME,
USE_PEFT_BACKEND,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .deprecation_utils import deprecate
from .doc_utils import replace_example_docstring
from .dynamic_modules_utils import get_class_from_dynamic_module
from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video
from .hub_utils import (
PushToHubMixin,
_add_variant,
_get_checkpoint_shard_files,
_get_model_file,
extract_commit_hash,
http_user_agent,
)
from .import_utils import (
BACKENDS_MAPPING,
DIFFUSERS_SLOW_IMPORT,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_accelerate_available,
is_accelerate_version,
is_bitsandbytes_available,
is_bitsandbytes_version,
is_bs4_available,
is_flax_available,
is_ftfy_available,
is_google_colab,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_matplotlib_available,
is_note_seq_available,
is_onnx_available,
is_peft_available,
is_peft_version,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_tensorboard_available,
is_timm_available,
is_torch_available,
is_torch_npu_available,
is_torch_version,
is_torch_xla_available,
is_torch_xla_version,
is_torchsde_available,
is_torchvision_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
is_wandb_available,
is_xformers_available,
requires_backends,
)
from .loading_utils import get_module_from_name, load_image, load_video
from .logging import get_logger
from .outputs import BaseOutput
from .peft_utils import (
check_peft_version,
delete_adapter_layers,
get_adapter_name,
get_peft_kwargs,
recurse_remove_peft_layers,
scale_lora_layers,
set_adapter_layers,
set_weights_and_activate_adapters,
unscale_lora_layers,
)
from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil
from .state_dict_utils import (
convert_all_state_dict_to_peft,
convert_state_dict_to_diffusers,
convert_state_dict_to_kohya,
convert_state_dict_to_peft,
convert_unet_state_dict_to_peft,
)
logger = get_logger(__name__)
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace diffusers (see "
"`https://huggingface.co/docs/diffusers/installation#install-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(error_message)
|
import warnings
from typing import List, Optional
import torchaudio
from . import utils
# TODO: Once legacy global backend is removed, move this to torchaudio.__init__
def _init_backend():
torchaudio.info = utils.get_info_func()
torchaudio.load = utils.get_load_func()
torchaudio.save = utils.get_save_func()
def list_audio_backends() -> List[str]:
return list(utils.get_available_backends().keys())
# Temporary until global backend is removed
def get_audio_backend() -> Optional[str]:
warnings.warn("I/O Dispatcher is enabled. There is no global audio backend.", stacklevel=2)
return None
# Temporary until global backend is removed
def set_audio_backend(_: Optional[str]):
warnings.warn("I/O Dispatcher is enabled. set_audio_backend is a no-op", stacklevel=2)
|
from .utils import get_info_func, get_load_func, get_save_func
info = get_info_func()
load = get_load_func()
save = get_save_func()
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`), an
AnyTensor (`PointCloud3D.tensor`), and an AnyEmbedding (`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
pc.second_embedding = model(pc.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDocument):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensor = mmdoc.point_cloud.url.load(samples=100)
# or
mmdoc.point_cloud.bytes = mmdoc.point_cloud.url.load_bytes()
"""
url: Optional[PointCloud3DUrl]
tensor: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`), an
AnyTensor (`PointCloud3D.tensor`), and an AnyEmbedding (`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
pc.second_embedding = model(pc.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDocument):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensor = mmdoc.point_cloud.url.load(samples=100)
# or
mmdoc.point_cloud.bytes = mmdoc.point_cloud.url.load_bytes()
"""
url: Optional[PointCloud3DUrl]
tensor: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
"""
Using rmm on a single node device
=================================
"""
import rmm
from sklearn.datasets import make_classification
import xgboost as xgb
# Initialize RMM pool allocator
rmm.reinitialize(pool_allocator=True)
# Optionally force XGBoost to use RMM for all GPU memory allocation, see ./README.md
# xgb.set_config(use_rmm=True)
X, y = make_classification(n_samples=10000, n_informative=5, n_classes=3)
dtrain = xgb.DMatrix(X, label=y)
params = {
"max_depth": 8,
"eta": 0.01,
"objective": "multi:softprob",
"num_class": 3,
"tree_method": "hist",
"device": "cuda",
}
# XGBoost will automatically use the RMM pool allocator
bst = xgb.train(params, dtrain, num_boost_round=100, evals=[(dtrain, "train")])
|
"""
Using rmm on a single node device
=================================
"""
import rmm
from sklearn.datasets import make_classification
import xgboost as xgb
# Initialize RMM pool allocator
rmm.reinitialize(pool_allocator=True)
# Optionally force XGBoost to use RMM for all GPU memory allocation, see ./README.md
# xgb.set_config(use_rmm=True)
X, y = make_classification(n_samples=10000, n_informative=5, n_classes=3)
dtrain = xgb.DMatrix(X, label=y)
params = {
"max_depth": 8,
"eta": 0.01,
"objective": "multi:softprob",
"num_class": 3,
"tree_method": "hist",
"device": "cuda",
}
# XGBoost will automatically use the RMM pool allocator
bst = xgb.train(params, dtrain, num_boost_round=100, evals=[(dtrain, "train")])
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
Model Anchor Sparsity: Active Dimensions: 105.5, Sparsity Ratio: 0.9965
Model Positive Sparsity: Active Dimensions: 69.8, Sparsity Ratio: 0.9977
Model Negative Sparsity: Active Dimensions: 68.6, Sparsity Ratio: 0.9978
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model, ".")
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
Model Sparsity Stats Query : Row Non-Zero Mean: 105.4530029296875, Row Sparsity Mean: 0.9965449571609497
Model Sparsity Stats Corpus : Row Non-Zero Mean: 69.18349838256836, Row Sparsity Mean: 0.9977333247661591
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
from typing import TYPE_CHECKING
from .github import GithubWebhooksManager
from .slant3d import Slant3DWebhooksManager
if TYPE_CHECKING:
from ..providers import ProviderName
from .base import BaseWebhooksManager
# --8<-- [start:WEBHOOK_MANAGERS_BY_NAME]
WEBHOOK_MANAGERS_BY_NAME: dict["ProviderName", type["BaseWebhooksManager"]] = {
handler.PROVIDER_NAME: handler
for handler in [
GithubWebhooksManager,
Slant3DWebhooksManager,
]
}
# --8<-- [end:WEBHOOK_MANAGERS_BY_NAME]
__all__ = ["WEBHOOK_MANAGERS_BY_NAME"]
|
from typing import TYPE_CHECKING
from .github import GithubWebhooksManager
from .slant3d import Slant3DWebhooksManager
if TYPE_CHECKING:
from .base import BaseWebhooksManager
# --8<-- [start:WEBHOOK_MANAGERS_BY_NAME]
WEBHOOK_MANAGERS_BY_NAME: dict[str, type["BaseWebhooksManager"]] = {
handler.PROVIDER_NAME: handler
for handler in [
GithubWebhooksManager,
Slant3DWebhooksManager,
]
}
# --8<-- [end:WEBHOOK_MANAGERS_BY_NAME]
__all__ = ["WEBHOOK_MANAGERS_BY_NAME"]
|
from llama_index.vector_stores.redis.base import RedisVectorStore, TokenEscaper
__all__ = ["RedisVectorStore", "TokenEscaper"]
|
from llama_index.vector_stores.redis.base import RedisVectorStore
__all__ = ["RedisVectorStore"]
|
from __future__ import annotations
from .MLMTransformer import MLMTransformer
from .SparseAutoEncoder import SparseAutoEncoder
from .SparseStaticEmbedding import SparseStaticEmbedding
from .SpladePooling import SpladePooling
__all__ = ["SparseAutoEncoder", "MLMTransformer", "SpladePooling", "SparseStaticEmbedding"]
|
from __future__ import annotations
from .IDF import IDF
from .MLMTransformer import MLMTransformer
from .SparseAutoEncoder import SparseAutoEncoder
from .SpladePooling import SpladePooling
__all__ = ["SparseAutoEncoder", "MLMTransformer", "SpladePooling", "IDF"]
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def teardown(self):
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
self.server.should_exit = True
@property
def should_exit(self) -> bool:
return self.server.should_exit
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import E2BDataAnalysisTool
from langchain_community.tools.e2b_data_analysis.tool import (
E2BDataAnalysisToolArguments,
UploadedFile,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"UploadedFile": "langchain_community.tools.e2b_data_analysis.tool",
"E2BDataAnalysisToolArguments": "langchain_community.tools.e2b_data_analysis.tool",
"E2BDataAnalysisTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"E2BDataAnalysisTool",
"E2BDataAnalysisToolArguments",
"UploadedFile",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import E2BDataAnalysisTool
from langchain_community.tools.e2b_data_analysis.tool import (
E2BDataAnalysisToolArguments,
UploadedFile,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"UploadedFile": "langchain_community.tools.e2b_data_analysis.tool",
"E2BDataAnalysisToolArguments": "langchain_community.tools.e2b_data_analysis.tool",
"E2BDataAnalysisTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"UploadedFile",
"E2BDataAnalysisToolArguments",
"E2BDataAnalysisTool",
]
|
import csv
import os
from pathlib import Path
from typing import Dict, List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
def load_commonvoice_item(
line: List[str], header: List[str], path: str, folder_audio: str, ext_audio: str
) -> Tuple[Tensor, int, Dict[str, str]]:
# Each line as the following data:
# client_id, path, sentence, up_votes, down_votes, age, gender, accent
if header[1] != "path":
raise ValueError(f"expect `header[1]` to be 'path', but got {header[1]}")
fileid = line[1]
filename = os.path.join(path, folder_audio, fileid)
if not filename.endswith(ext_audio):
filename += ext_audio
waveform, sample_rate = torchaudio.load(filename)
dic = dict(zip(header, line))
return waveform, sample_rate, dic
class COMMONVOICE(Dataset):
"""Create a Dataset for *CommonVoice* :cite:`ardila2020common`.
Args:
root (str or Path): Path to the directory where the dataset is located.
(Where the ``tsv`` file is present.)
tsv (str, optional):
The name of the tsv file used to construct the metadata, such as
``"train.tsv"``, ``"test.tsv"``, ``"dev.tsv"``, ``"invalidated.tsv"``,
``"validated.tsv"`` and ``"other.tsv"``. (default: ``"train.tsv"``)
"""
_ext_txt = ".txt"
_ext_audio = ".mp3"
_folder_audio = "clips"
def __init__(self, root: Union[str, Path], tsv: str = "train.tsv") -> None:
# Get string representation of 'root' in case Path object is passed
self._path = os.fspath(root)
self._tsv = os.path.join(self._path, tsv)
with open(self._tsv, "r") as tsv_:
walker = csv.reader(tsv_, delimiter="\t")
self._header = next(walker)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Dict[str, str]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Dict[str, str]): ``(waveform, sample_rate, dictionary)``, where dictionary
is built from the TSV file with the following keys: ``client_id``, ``path``, ``sentence``,
``up_votes``, ``down_votes``, ``age``, ``gender`` and ``accent``.
"""
line = self._walker[n]
return load_commonvoice_item(line, self._header, self._path, self._folder_audio, self._ext_audio)
def __len__(self) -> int:
return len(self._walker)
|
import csv
import os
from pathlib import Path
from typing import Dict, List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
def load_commonvoice_item(
line: List[str], header: List[str], path: str, folder_audio: str, ext_audio: str
) -> Tuple[Tensor, int, Dict[str, str]]:
# Each line as the following data:
# client_id, path, sentence, up_votes, down_votes, age, gender, accent
if header[1] != "path":
raise ValueError(f"expect `header[1]` to be 'path', but got {header[1]}")
fileid = line[1]
filename = os.path.join(path, folder_audio, fileid)
if not filename.endswith(ext_audio):
filename += ext_audio
waveform, sample_rate = torchaudio.load(filename)
dic = dict(zip(header, line))
return waveform, sample_rate, dic
class COMMONVOICE(Dataset):
"""Create a Dataset for *CommonVoice* [:footcite:`ardila2020common`].
Args:
root (str or Path): Path to the directory where the dataset is located.
(Where the ``tsv`` file is present.)
tsv (str, optional):
The name of the tsv file used to construct the metadata, such as
``"train.tsv"``, ``"test.tsv"``, ``"dev.tsv"``, ``"invalidated.tsv"``,
``"validated.tsv"`` and ``"other.tsv"``. (default: ``"train.tsv"``)
"""
_ext_txt = ".txt"
_ext_audio = ".mp3"
_folder_audio = "clips"
def __init__(self, root: Union[str, Path], tsv: str = "train.tsv") -> None:
# Get string representation of 'root' in case Path object is passed
self._path = os.fspath(root)
self._tsv = os.path.join(self._path, tsv)
with open(self._tsv, "r") as tsv_:
walker = csv.reader(tsv_, delimiter="\t")
self._header = next(walker)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Dict[str, str]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Dict[str, str]): ``(waveform, sample_rate, dictionary)``, where dictionary
is built from the TSV file with the following keys: ``client_id``, ``path``, ``sentence``,
``up_votes``, ``down_votes``, ``age``, ``gender`` and ``accent``.
"""
line = self._walker[n]
return load_commonvoice_item(line, self._header, self._path, self._folder_audio, self._ext_audio)
def __len__(self) -> int:
return len(self._walker)
|
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
# TODO: find a better way to collect image_level_labels
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'instances', 'image_level_labels'))
]
train_dataloader = dict(
batch_size=2,
num_workers=0, # workers_per_gpu > 0 may occur out of memory
persistent_workers=False,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/train-image-metas.pkl',
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=0,
persistent_workers=False,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/validation-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/validation/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/validation-image-metas.pkl',
image_level_ann_file='annotations/validation-'
'annotations-human-imagelabels-boxable.csv',
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='OpenImagesMetric',
iou_thrs=0.5,
ioa_thrs=0.5,
use_group_of=True,
get_supercategory=True)
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
# TODO: find a better way to collect image_level_labels
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'instances', 'image_level_labels'))
]
train_dataloader = dict(
batch_size=2,
num_workers=0, # workers_per_gpu > 0 may occur out of memory
persistent_workers=False,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/train-image-metas.pkl',
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=0,
persistent_workers=False,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/validation-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/validation/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/validation-image-metas.pkl',
image_level_ann_file='annotations/validation-'
'annotations-human-imagelabels-boxable.csv',
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='OpenImagesMetric',
iou_thr=0.5,
ioa_thr=0.5,
use_group_of=True,
get_supercategory=True)
test_evaluator = val_evaluator
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
from mmdet.registry import DATA_SAMPLERS
from mmdet.utils import get_device
@DATA_SAMPLERS.register_module()
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# In distributed sampling, different ranks should sample
# non-overlapped data in the dataset. Therefore, this function
# is used to make sure that each rank shuffles the data indices
# in the same order based on the same seed. Then different ranks
# could use different indices to select non-overlapped data from the
# same data list.
device = get_device()
self.seed = sync_random_seed(seed, device)
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
# When :attr:`shuffle=True`, this ensures all replicas
# use a different random ordering for each epoch.
# Otherwise, the next iteration of this sampler will
# yield the same ordering.
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
from mmdet.utils import get_device
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# In distributed sampling, different ranks should sample
# non-overlapped data in the dataset. Therefore, this function
# is used to make sure that each rank shuffles the data indices
# in the same order based on the same seed. Then different ranks
# could use different indices to select non-overlapped data from the
# same data list.
device = get_device()
self.seed = sync_random_seed(seed, device)
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
# When :attr:`shuffle=True`, this ensures all replicas
# use a different random ordering for each epoch.
# Otherwise, the next iteration of this sampler will
# yield the same ordering.
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
import pytest
from backend.data import db
from backend.executor.scheduler import SchedulerClient
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
from backend.util.test import SpinTestServer
@pytest.mark.asyncio(loop_scope="session")
async def test_agent_schedule(server: SpinTestServer):
await db.connect()
test_user = await create_test_user()
test_graph = await server.agent_server.test_create_graph(
create_graph=CreateGraph(graph=create_test_graph()),
user_id=test_user.id,
)
scheduler = get_service_client(SchedulerClient)
schedules = await scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 0
schedule = await scheduler.add_execution_schedule(
graph_id=test_graph.id,
user_id=test_user.id,
graph_version=1,
cron="0 0 * * *",
input_data={"input": "data"},
input_credentials={},
)
assert schedule
schedules = await scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 1
assert schedules[0].cron == "0 0 * * *"
await scheduler.delete_schedule(schedule.id, user_id=test_user.id)
schedules = await scheduler.get_execution_schedules(
test_graph.id, user_id=test_user.id
)
assert len(schedules) == 0
|
import pytest
from backend.data import db
from backend.executor.scheduler import SchedulerClient
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
from backend.util.test import SpinTestServer
@pytest.mark.asyncio(loop_scope="session")
async def test_agent_schedule(server: SpinTestServer):
await db.connect()
test_user = await create_test_user()
test_graph = await server.agent_server.test_create_graph(
create_graph=CreateGraph(graph=create_test_graph()),
user_id=test_user.id,
)
scheduler = get_service_client(SchedulerClient)
schedules = await scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 0
schedule = await scheduler.add_execution_schedule(
graph_id=test_graph.id,
user_id=test_user.id,
graph_version=1,
cron="0 0 * * *",
input_data={"input": "data"},
)
assert schedule
schedules = await scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 1
assert schedules[0].cron == "0 0 * * *"
await scheduler.delete_schedule(schedule.id, user_id=test_user.id)
schedules = await scheduler.get_execution_schedules(
test_graph.id, user_id=test_user.id
)
assert len(schedules) == 0
|
import multiprocessing
import pytest
from jina import Client
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import _generate_pod_args
def _create_worker_runtime(port, name='', executor=None):
args = _generate_pod_args()
args.port = port
args.name = name
if executor:
args.uses = executor
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _create_gateway_runtime(graph_description, pod_addresses, port, protocol='grpc'):
with GatewayRuntime(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
'--protocol',
protocol,
]
)
) as runtime:
runtime.run_forever()
def _setup(worker_port, port, protocol):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port,)
)
worker_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port, protocol),
)
gateway_process.start()
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{worker_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
return worker_process, gateway_process
@pytest.mark.parametrize('protocol', ['http'])
def test_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', port=port, protocol=protocol)
dry_run_alive = c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_async_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', asyncio=True, port=port, protocol=protocol)
dry_run_alive = await c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = await c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
import multiprocessing
import pytest
from jina import Client
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
def _create_worker_runtime(port, name='', executor=None):
args = set_pod_parser().parse_args([])
args.port = port
args.name = name
if executor:
args.uses = executor
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _create_gateway_runtime(graph_description, pod_addresses, port, protocol='grpc'):
with GatewayRuntime(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
'--protocol',
protocol,
]
)
) as runtime:
runtime.run_forever()
def _setup(worker_port, port, protocol):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port,)
)
worker_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port, protocol),
)
gateway_process.start()
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{worker_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
return worker_process, gateway_process
@pytest.mark.parametrize('protocol', ['http'])
def test_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', port=port, protocol=protocol)
dry_run_alive = c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_async_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', asyncio=True, port=port, protocol=protocol)
dry_run_alive = await c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = await c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import FileChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FileChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FileChatMessageHistory",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import FileChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FileChatMessageHistory": "langchain_community.chat_message_histories"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FileChatMessageHistory",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .onnx_helper import (add_dummy_nms_for_onnx, dynamic_clip_for_onnx,
get_k_for_topk)
from .pytorch2onnx import (build_model_from_cfg,
generate_inputs_and_wrap_model,
preprocess_example_input)
__all__ = [
'build_model_from_cfg', 'generate_inputs_and_wrap_model',
'preprocess_example_input', 'get_k_for_topk', 'add_dummy_nms_for_onnx',
'dynamic_clip_for_onnx'
]
|
from .onnx_helper import (add_dummy_nms_for_onnx, dynamic_clip_for_onnx,
get_k_for_topk)
from .pytorch2onnx import (build_model_from_cfg,
generate_inputs_and_wrap_model,
preprocess_example_input)
__all__ = [
'build_model_from_cfg', 'generate_inputs_and_wrap_model',
'preprocess_example_input', 'get_k_for_topk', 'add_dummy_nms_for_onnx',
'dynamic_clip_for_onnx'
]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from laserembeddings import Laser
class LaserEncoder(Executor):
"""
LaserEncoder is a text encoder based on Facebook Research's LASER encoder.
:class:`LaserEncoder` is a encoder based on Facebook Research's LASER
(Language-Agnostic SEntence Representations) to compute multilingual
sentence embeddings: https://github.com/facebookresearch/LASER
This encoder is suitable for producing multi-lingual sentence embeddings, enabling
you to have sentences from multiple languages in the same latent space.
"""
def __init__(
self,
path_to_bpe_codes: Optional[str] = None,
path_to_bpe_vocab: Optional[str] = None,
path_to_encoder: Optional[str] = None,
download_data: bool = True,
language: str = 'en',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param path_to_bpe_codes: path to bpe codes from Laser. Defaults to
``Laser.DEFAULT_BPE_CODES_FILE.``
:param path_to_bpe_vocab: path to bpe vocabs from Laser. Defaults to
``Laser.DEFAULT_BPE_VOCAB_FILE``.
:param path_to_encoder: path to the encoder from Laser. Defaults to
``Laser.DEFAULT_ENCODER_FILE``.
:param download_data: Whether data should be downloaded on initialization. This is
convenient when just trying out the encoder, but should be turned off in a
production setting (where you should already have the data on disk), as it can
lead to large startup times.
:param language: The default language of the text. Can be overriden by a
request parameter. The full list of possible values can be found at
[LASER](https://github.com/facebookresearch/LASER#supported-languages)
with the language code
([ISO 639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes))
:param traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param batch_size: size of each batch
:param device: Device string ('cpu'/'cuda'/'cuda:2')
"""
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self._path_to_bpe_codes = path_to_bpe_codes
self._path_to_bpe_vocab = path_to_bpe_vocab
self._path_to_encoder = path_to_encoder
self.device = device
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.language = language
if download_data:
self.logger.info("Downloading data for the Laser model")
subprocess.run(
['python', '-m', 'laserembeddings', 'download-models'], check=True
)
self.model = Laser(
bpe_codes=self._path_to_bpe_codes,
bpe_vocab=self._path_to_bpe_vocab,
encoder=self._path_to_encoder,
embedding_options={'cpu': self.device == 'cpu'},
)
self.device = torch.device(device)
self.model.bpeSentenceEmbedding.encoder.encoder.to(self.device)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding attribute
of the docs.
:param docs: documents sent to the encoder. The docs must have the ``text``
attribute.
:param parameters: dictionary to define the ``traversal_path``, the
``batch_size`` and ``language``. For example,
``{'traversal_paths': ['r'], 'batch_size': 10}``. This will override the
default parameters set at init.
"""
if docs is None:
return
document_batches_generator = docs.traverse_flat(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
filter_fn=lambda doc:len(doc.text)>0
).batch(
batch_size=parameters.get('batch_size', self.batch_size),
)
for document_batch in document_batches_generator:
text_batch = document_batch.texts
language = parameters.get('language', self.language)
embeddings = self.model.embed_sentences(text_batch, lang=language)
for document, embedding in zip(document_batch, embeddings):
document.embedding = embedding
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from laserembeddings import Laser
class LaserEncoder(Executor):
"""
LaserEncoder is a text encoder based on Facebook Research's LASER encoder.
:class:`LaserEncoder` is a encoder based on Facebook Research's LASER
(Language-Agnostic SEntence Representations) to compute multilingual
sentence embeddings: https://github.com/facebookresearch/LASER
This encoder is suitable for producing multi-lingual sentence embeddings, enabling
you to have sentences from multiple languages in the same latent space.
"""
def __init__(
self,
path_to_bpe_codes: Optional[str] = None,
path_to_bpe_vocab: Optional[str] = None,
path_to_encoder: Optional[str] = None,
download_data: bool = True,
language: str = 'en',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param path_to_bpe_codes: path to bpe codes from Laser. Defaults to
``Laser.DEFAULT_BPE_CODES_FILE.``
:param path_to_bpe_vocab: path to bpe vocabs from Laser. Defaults to
``Laser.DEFAULT_BPE_VOCAB_FILE``.
:param path_to_encoder: path to the encoder from Laser. Defaults to
``Laser.DEFAULT_ENCODER_FILE``.
:param download_data: Whether data should be downloaded on initialization. This is
convenient when just trying out the encoder, but should be turned off in a
production setting (where you should already have the data on disk), as it can
lead to large startup times.
:param language: The default language of the text. Can be overriden by a
request parameter. The full list of possible values can be found at
[LASER](https://github.com/facebookresearch/LASER#supported-languages)
with the language code
([ISO 639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes))
:param traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param batch_size: size of each batch
:param device: Device string ('cpu'/'cuda'/'cuda:2')
"""
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self._path_to_bpe_codes = path_to_bpe_codes
self._path_to_bpe_vocab = path_to_bpe_vocab
self._path_to_encoder = path_to_encoder
self.device = device
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.language = language
if download_data:
self.logger.info("Downloading data for the Laser model")
subprocess.run(
['python', '-m', 'laserembeddings', 'download-models'], check=True
)
self.model = Laser(
bpe_codes=self._path_to_bpe_codes,
bpe_vocab=self._path_to_bpe_vocab,
encoder=self._path_to_encoder,
embedding_options={'cpu': self.device == 'cpu'},
)
self.device = torch.device(device)
self.model.bpeSentenceEmbedding.encoder.encoder.to(self.device)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding attribute
of the docs.
:param docs: documents sent to the encoder. The docs must have the ``text``
attribute.
:param parameters: dictionary to define the ``traversal_path``, the
``batch_size`` and ``language``. For example,
``{'traversal_paths': ['r'], 'batch_size': 10}``. This will override the
default parameters set at init.
"""
if docs is None:
return
document_batches_generator = docs.batch(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
batch_size=parameters.get('batch_size', self.batch_size),
require_attr='text',
)
for document_batch in document_batches_generator:
text_batch = document_batch.texts
language = parameters.get('language', self.language)
embeddings = self.model.embed_sentences(text_batch, lang=language)
for document, embedding in zip(document_batch, embeddings):
document.embedding = embedding
|
from keras.src import backend
def is_in_jax_tracing_scope(x=None):
if backend.backend() == "jax":
if x is None:
x = backend.numpy.ones(())
for c in x.__class__.__mro__:
if c.__name__ == "Tracer" and c.__module__.startswith("jax"):
return True
return False
|
from keras.src import backend
def is_in_jax_tracing_scope(x=None):
if backend.backend() == "jax":
if x is None:
x = backend.numpy.ones(())
if x.__class__.__name__ == "DynamicJaxprTracer":
return True
return False
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from typing import Dict
import pytest
import torch
import numpy as np
from torchvision.models.mobilenetv2 import model_urls
from PIL import Image
from jina import DocumentArray, Document
@pytest.fixture()
def test_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def mobilenet_weights(tmpdir: str) -> str:
weights_file = os.path.join(tmpdir, 'w.pth')
torch.hub.download_url_to_file(
url=model_urls['mobilenet_v2'], dst=weights_file, progress=False
)
return weights_file
@pytest.fixture()
def docs_with_blobs() -> DocumentArray:
return DocumentArray(
[Document(blob=np.ones((10, 10, 3), dtype=np.uint8)) for _ in range(11)]
)
@pytest.fixture()
def docs_with_chunk_blobs() -> DocumentArray:
return DocumentArray(
[
Document(chunks=[Document(blob=np.ones((10, 10, 3), dtype=np.uint8))])
for _ in range(11)
]
)
@pytest.fixture()
def docs_with_chunk_chunk_blobs() -> DocumentArray:
return DocumentArray(
[
Document(
chunks=[
Document(
chunks=[
Document(blob=np.ones((10, 10, 3), dtype=np.uint8))
for _ in range(11)
]
)
]
)
]
)
@pytest.fixture()
def test_images(test_dir: str) -> Dict[str, np.ndarray]:
def get_path(file_name_no_suffix: str) -> str:
return os.path.join(test_dir, 'test_data', file_name_no_suffix + '.png')
image_dict = {
file_name: np.array(Image.open(get_path(file_name)))[:, :, 0:3]
for file_name in ['airplane', 'banana1', 'banana2', 'satellite', 'studio']
}
return image_dict
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from typing import Dict
import pytest
import torch
import numpy as np
from torchvision.models.mobilenetv2 import model_urls
from PIL import Image
from jina import DocumentArray, Document
@pytest.fixture()
def test_dir() -> str:
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def mobilenet_weights(tmpdir: str) -> str:
weights_file = os.path.join(tmpdir, 'w.pth')
torch.hub.download_url_to_file(url=model_urls['mobilenet_v2'], dst=weights_file)
return weights_file
@pytest.fixture()
def docs_with_blobs() -> DocumentArray:
return DocumentArray([
Document(blob=np.ones((10, 10, 3), dtype=np.uint8)) for _ in range(11)
])
@pytest.fixture()
def docs_with_chunk_blobs() -> DocumentArray:
return DocumentArray([
Document(
chunks=[Document(blob=np.ones((10, 10, 3), dtype=np.uint8))]) for _ in range(11)
])
@pytest.fixture()
def docs_with_chunk_chunk_blobs() -> DocumentArray:
return DocumentArray([
Document(
chunks=[Document(
chunks=[Document(blob=np.ones((10, 10, 3), dtype=np.uint8)) for _ in range(11)])])
])
@pytest.fixture()
def test_images(test_dir: str) -> Dict[str, np.ndarray]:
def get_path(file_name_no_suffix: str) -> str:
return os.path.join(test_dir, 'test_data', file_name_no_suffix + '.png')
image_dict = {
file_name: np.array(Image.open(get_path(file_name)))[:, :, 0:3] for file_name in [
'airplane', 'banana1', 'banana2', 'satellite', 'studio'
]
}
return image_dict
|
from ._dsp import (
adsr_envelope,
exp_sigmoid,
extend_pitch,
filter_waveform,
frequency_impulse_response,
oscillator_bank,
sinc_impulse_response,
)
from ._rir import simulate_rir_ism
from .functional import barkscale_fbanks, chroma_filterbank
__all__ = [
"adsr_envelope",
"exp_sigmoid",
"barkscale_fbanks",
"chroma_filterbank",
"extend_pitch",
"filter_waveform",
"frequency_impulse_response",
"oscillator_bank",
"sinc_impulse_response",
"simulate_rir_ism",
]
|
from ._dsp import (
adsr_envelope,
exp_sigmoid,
extend_pitch,
filter_waveform,
frequency_impulse_response,
oscillator_bank,
sinc_impulse_response,
)
from ._rir import simulate_rir_ism
from .functional import barkscale_fbanks
__all__ = [
"adsr_envelope",
"exp_sigmoid",
"barkscale_fbanks",
"extend_pitch",
"filter_waveform",
"frequency_impulse_response",
"oscillator_bank",
"sinc_impulse_response",
"simulate_rir_ism",
]
|
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Dict,
Optional,
)
import numpy as np
from .... import Document, DocumentArray
from ....math import ndarray
from ....math.helper import EPSILON
from ....math.ndarray import to_numpy_array
from ....score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
WeaviateArrayType = TypeVar(
'WeaviateArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
def _find_similar_vectors(
self, query: 'WeaviateArrayType', limit=10, filter: Optional[Dict] = None
):
query = to_numpy_array(query)
is_all_zero = np.all(query == 0)
if is_all_zero:
query = query + EPSILON
query_dict = {'vector': query}
query_builder = (
self._client.query.get(self._class_name, '_serialized')
.with_additional(['id', 'certainty'])
.with_limit(limit)
.with_near_vector(query_dict)
)
if filter:
query_builder = query_builder.with_where(filter)
results = query_builder.do()
docs = []
if 'errors' in results:
errors = '\n'.join(map(lambda error: error['message'], results['errors']))
raise ValueError(
f'find failed, please check your filter query. Errors: \n{errors}'
)
found_results = (
results.get('data', {}).get('Get', {}).get(self._class_name, []) or []
)
# The serialized document is stored in results['data']['Get'][self._class_name]
for result in found_results:
doc = Document.from_base64(result['_serialized'], **self._serialize_config)
certainty = result['_additional']['certainty']
doc.scores['weaviate_certainty'] = NamedScore(value=certainty)
if certainty is None:
doc.scores['cosine_similarity'] = NamedScore(value=None)
else:
doc.scores['cosine_similarity'] = NamedScore(value=2 * certainty - 1)
doc.tags['wid'] = result['_additional']['id']
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'WeaviateArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be stored in Weaviate. This includes any from the list '[np.ndarray, tensorflow.Tensor, torch.Tensor, Sequence[float]]'
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:return: DocumentArray containing the closest documents to the query if it is a single query, otherwise a list of DocumentArrays containing
the closest Document objects for each of the queries in `query`.
Note: Weaviate returns `certainty` values. To get cosine similarities one needs to use `cosine_sim = 2*certainty - 1` as explained here:
https://www.semi.technology/developers/weaviate/current/more-resources/faq.html#q-how-do-i-get-the-cosine-similarity-from-weaviates-certainty
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit, filter=filter)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit, filter=filter)
closest_docs.append(da)
return closest_docs
|
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
)
import numpy as np
from .... import Document, DocumentArray
from ....math import ndarray
from ....math.helper import EPSILON
from ....math.ndarray import to_numpy_array
from ....score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
WeaviateArrayType = TypeVar(
'WeaviateArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
def _find_similar_vectors(self, query: 'WeaviateArrayType', limit=10):
query = to_numpy_array(query)
is_all_zero = np.all(query == 0)
if is_all_zero:
query = query + EPSILON
query_dict = {'vector': query}
results = (
self._client.query.get(
self._class_name,
['_serialized', '_additional {certainty}', '_additional {id}'],
)
.with_limit(limit)
.with_near_vector(query_dict)
.do()
)
docs = []
# The serialized document is stored in results['data']['Get'][self._class_name]
for result in results.get('data', {}).get('Get', {}).get(self._class_name, []):
doc = Document.from_base64(result['_serialized'], **self._serialize_config)
certainty = result['_additional']['certainty']
doc.scores['weaviate_certainty'] = NamedScore(value=certainty)
if certainty is None:
doc.scores['cosine_similarity'] = NamedScore(value=None)
else:
doc.scores['cosine_similarity'] = NamedScore(value=2 * certainty - 1)
doc.tags['wid'] = result['_additional']['id']
docs.append(doc)
return DocumentArray(docs)
def _find(
self, query: 'WeaviateArrayType', limit: int = 10, **kwargs
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be stored in Weaviate. This includes any from the list '[np.ndarray, tensorflow.Tensor, torch.Tensor, Sequence[float]]'
:param limit: number of retrieved items
:return: DocumentArray containing the closest documents to the query if it is a single query, otherwise a list of DocumentArrays containing
the closest Document objects for each of the queries in `query`.
Note: Weaviate returns `certainty` values. To get cosine similarities one needs to use `cosine_sim = 2*certainty - 1` as explained here:
https://www.semi.technology/developers/weaviate/current/more-resources/faq.html#q-how-do-i-get-the-cosine-similarity-from-weaviates-certainty
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit)
closest_docs.append(da)
return closest_docs
|
from typing import Optional
import numpy as np
import pytest
import torch
from docarray import DocumentArray
from docarray.base_document import BaseDocument
from docarray.typing import NdArray, TorchTensor
@pytest.mark.proto
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: NdArray
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(
text='hello', inner=CustomInnerDoc(tensor=torch.zeros((3, 224, 224)))
)
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=torch.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
|
from typing import Optional
import numpy as np
import torch
from docarray import DocumentArray
from docarray.base_document import BaseDocument
from docarray.typing import NdArray, TorchTensor
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: NdArray
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
def test_proto_with_nested_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(
text='hello', inner=CustomInnerDoc(tensor=torch.zeros((3, 224, 224)))
)
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_with_chunks_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=torch.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadEmptyAnnotations,
LoadImageFromNDArray, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixShapeResize, MinIoURandomCrop, MixUp,
Mosaic, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize', 'ProposalBroadcaster'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadEmptyAnnotations,
LoadImageFromNDArray, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixShapeResize, MinIoURandomCrop, MixUp,
Mosaic, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize'
]
|
import multiprocessing
import time
import grpc
import pytest
import requests
from jina import __version__
from jina.constants import __jina_env__
from jina.proto import jina_pb2, jina_pb2_grpc
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.servers import BaseServer
from jina.serve.runtimes.worker.request_handling import WorkerRequestHandler
from tests.helper import _generate_pod_args
from .test_runtimes import _create_gateway_runtime, _create_head_runtime
def _create_worker_runtime(port, name='', executor=None):
args = _generate_pod_args()
args.port = [port]
args.name = name
if executor:
args.uses = executor
with AsyncNewLoopRuntime(args, req_handler_cls=WorkerRequestHandler) as runtime:
runtime.run_forever()
def _create_worker(port):
# create a single worker runtime
p = multiprocessing.Process(target=_create_worker_runtime, args=(port,))
p.start()
time.sleep(0.1)
return p
def _create_gateway(port, graph, pod_addr, protocol):
# create a single worker runtime
# create a single gateway runtime
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph, pod_addr, port, protocol),
)
p.start()
time.sleep(0.1)
return p
def _create_head(port, connection_list_dict, polling='ANY'):
p = multiprocessing.Process(
target=_create_head_runtime, args=(port, connection_list_dict, 'head', polling)
)
p.start()
time.sleep(0.1)
return p
@pytest.mark.parametrize('runtime', ['head', 'worker', 'gateway'])
def test_jina_info_grpc_based_runtimes(runtime, port_generator):
port = port_generator()
connection_list_dict = {}
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{port}"]}}'
if runtime == 'head':
p = _create_head(port, connection_list_dict)
elif runtime == 'gateway':
p = _create_gateway(port, graph_description, pod_addresses, 'grpc')
else:
p = _create_worker(port)
try:
BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
channel = grpc.insecure_channel(f'localhost:{port}')
stub = jina_pb2_grpc.JinaInfoRPCStub(channel)
res = stub._status(
jina_pb2.google_dot_protobuf_dot_empty__pb2.Empty(),
)
assert res.jina['jina'] == __version__
for env_var in __jina_env__:
assert env_var in res.envs
except Exception:
assert False
finally:
p.terminate()
p.join()
@pytest.mark.parametrize('protocol', ['http', 'websocket'])
def test_jina_info_gateway_http(protocol, port_generator):
port = port_generator()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{port}"]}}'
p = _create_gateway(port, graph_description, pod_addresses, protocol)
try:
BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
x = requests.get(f'http://localhost:{port}/status')
resp = x.json()
assert 'jina' in resp
assert 'envs' in resp
assert resp['jina']['jina'] == __version__
for env_var in __jina_env__:
assert env_var in resp['envs']
except Exception:
assert False
finally:
p.terminate()
p.join()
|
import multiprocessing
import time
import grpc
import pytest
import requests
from jina import __version__
from jina.constants import __jina_env__
from jina.proto import jina_pb2, jina_pb2_grpc
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import _generate_pod_args
from .test_runtimes import _create_gateway_runtime, _create_head_runtime
def _create_worker_runtime(port, name='', executor=None):
args = _generate_pod_args()
args.port = port
args.name = name
if executor:
args.uses = executor
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _create_worker(port):
# create a single worker runtime
p = multiprocessing.Process(target=_create_worker_runtime, args=(port,))
p.start()
time.sleep(0.1)
return p
def _create_gateway(port, graph, pod_addr, protocol):
# create a single worker runtime
# create a single gateway runtime
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph, pod_addr, port, protocol),
)
p.start()
time.sleep(0.1)
return p
def _create_head(port, connection_list_dict, polling='ANY'):
p = multiprocessing.Process(
target=_create_head_runtime, args=(port, connection_list_dict, 'head', polling)
)
p.start()
time.sleep(0.1)
return p
@pytest.mark.parametrize('runtime', ['head', 'worker', 'gateway'])
def test_jina_info_grpc_based_runtimes(runtime, port_generator):
port = port_generator()
connection_list_dict = {}
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{port}"]}}'
if runtime == 'head':
p = _create_head(port, connection_list_dict)
elif runtime == 'gateway':
p = _create_gateway(port, graph_description, pod_addresses, 'grpc')
else:
p = _create_worker(port)
try:
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
channel = grpc.insecure_channel(f'localhost:{port}')
stub = jina_pb2_grpc.JinaInfoRPCStub(channel)
res = stub._status(
jina_pb2.google_dot_protobuf_dot_empty__pb2.Empty(),
)
assert res.jina['jina'] == __version__
for env_var in __jina_env__:
assert env_var in res.envs
except Exception:
assert False
finally:
p.terminate()
p.join()
@pytest.mark.parametrize('protocol', ['http', 'websocket'])
def test_jina_info_gateway_http(protocol, port_generator):
port = port_generator()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{port}"]}}'
p = _create_gateway(port, graph_description, pod_addresses, protocol)
try:
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
x = requests.get(f'http://localhost:{port}/status')
resp = x.json()
assert 'jina' in resp
assert 'envs' in resp
assert resp['jina']['jina'] == __version__
for env_var in __jina_env__:
assert env_var in resp['envs']
except Exception:
assert False
finally:
p.terminate()
p.join()
|
# coding=utf-8
# Copyright 2025 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import DiffusionPipeline
from diffusers.utils.testing_utils import backend_empty_cache, require_torch_gpu, slow, torch_device
@require_torch_gpu
@slow
class QuantCompileTests(unittest.TestCase):
quantization_config = None
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
torch.compiler.reset()
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
torch.compiler.reset()
def _init_pipeline(self, quantization_config, torch_dtype):
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-3-medium-diffusers",
quantization_config=quantization_config,
torch_dtype=torch_dtype,
)
return pipe
def _test_torch_compile(self, quantization_config, torch_dtype=torch.bfloat16):
pipe = self._init_pipeline(quantization_config, torch_dtype).to("cuda")
# import to ensure fullgraph True
pipe.transformer.compile(fullgraph=True)
for _ in range(2):
# small resolutions to ensure speedy execution.
pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256)
def _test_torch_compile_with_cpu_offload(self, quantization_config, torch_dtype=torch.bfloat16):
pipe = self._init_pipeline(quantization_config, torch_dtype)
pipe.enable_model_cpu_offload()
pipe.transformer.compile()
for _ in range(2):
# small resolutions to ensure speedy execution.
pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256)
def _test_torch_compile_with_group_offload(self, quantization_config, torch_dtype=torch.bfloat16):
torch._dynamo.config.cache_size_limit = 10000
pipe = self._init_pipeline(quantization_config, torch_dtype)
group_offload_kwargs = {
"onload_device": torch.device("cuda"),
"offload_device": torch.device("cpu"),
"offload_type": "leaf_level",
"use_stream": True,
"non_blocking": True,
}
pipe.transformer.enable_group_offload(**group_offload_kwargs)
pipe.transformer.compile()
for name, component in pipe.components.items():
if name != "transformer" and isinstance(component, torch.nn.Module):
if torch.device(component.device).type == "cpu":
component.to("cuda")
for _ in range(2):
# small resolutions to ensure speedy execution.
pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256)
|
# coding=utf-8
# Copyright 2024 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import DiffusionPipeline
from diffusers.utils.testing_utils import backend_empty_cache, require_torch_gpu, slow, torch_device
@require_torch_gpu
@slow
class QuantCompileTests(unittest.TestCase):
quantization_config = None
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
torch.compiler.reset()
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
torch.compiler.reset()
def _init_pipeline(self, quantization_config, torch_dtype):
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-3-medium-diffusers",
quantization_config=quantization_config,
torch_dtype=torch_dtype,
)
return pipe
def _test_torch_compile(self, quantization_config, torch_dtype=torch.bfloat16):
pipe = self._init_pipeline(quantization_config, torch_dtype).to("cuda")
# import to ensure fullgraph True
pipe.transformer.compile(fullgraph=True)
for _ in range(2):
# small resolutions to ensure speedy execution.
pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256)
def _test_torch_compile_with_cpu_offload(self, quantization_config, torch_dtype=torch.bfloat16):
pipe = self._init_pipeline(quantization_config, torch_dtype)
pipe.enable_model_cpu_offload()
pipe.transformer.compile()
for _ in range(2):
# small resolutions to ensure speedy execution.
pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256)
def _test_torch_compile_with_group_offload(self, quantization_config, torch_dtype=torch.bfloat16):
torch._dynamo.config.cache_size_limit = 10000
pipe = self._init_pipeline(quantization_config, torch_dtype)
group_offload_kwargs = {
"onload_device": torch.device("cuda"),
"offload_device": torch.device("cpu"),
"offload_type": "leaf_level",
"use_stream": True,
"non_blocking": True,
}
pipe.transformer.enable_group_offload(**group_offload_kwargs)
pipe.transformer.compile()
for name, component in pipe.components.items():
if name != "transformer" and isinstance(component, torch.nn.Module):
if torch.device(component.device).type == "cpu":
component.to("cuda")
for _ in range(2):
# small resolutions to ensure speedy execution.
pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
from .fused_semantic_head import FusedSemanticHead
@MODELS.register_module()
class SCNetSemanticHead(FusedSemanticHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res: bool = True, **kwargs) -> None:
super().__init__(**kwargs)
self.conv_to_res = conv_to_res
if self.conv_to_res:
num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
from .fused_semantic_head import FusedSemanticHead
@MODELS.register_module()
class SCNetSemanticHead(FusedSemanticHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res=True, **kwargs):
super(SCNetSemanticHead, self).__init__(**kwargs)
self.conv_to_res = conv_to_res
if self.conv_to_res:
num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
|
"""python scripts/update_mypy_ruff.py"""
import glob
import tomllib
from pathlib import Path
import toml
import subprocess
import re
ROOT_DIR = Path(__file__).parents[1]
def main():
for path in glob.glob(str(ROOT_DIR / "libs/**/pyproject.toml"), recursive=True):
if "libs/cli/" in path:
continue
print(path)
with open(path, "rb") as f:
pyproject = tomllib.load(f)
try:
pyproject["tool"]["poetry"]["group"]["typing"]["dependencies"]["mypy"] = (
"^1.10"
)
pyproject["tool"]["poetry"]["group"]["lint"]["dependencies"]["ruff"] = (
"^0.5"
)
except KeyError:
continue
with open(path, "w") as f:
toml.dump(pyproject, f)
cwd = "/".join(path.split("/")[:-1])
subprocess.run(
"poetry lock --no-update; poetry install --with lint; poetry run ruff format .; poetry run ruff --fix .",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
completed = subprocess.run(
"poetry lock --no-update; poetry install --with lint, typing; poetry run mypy . --no-color",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
logs = completed.stdout.split("\n")
to_ignore = {}
for l in logs:
if re.match("^(.*)\:(\d+)\: error:.*\[(.*)\]", l):
path, line_no, error_type = re.match(
"^(.*)\:(\d+)\: error:.*\[(.*)\]", l
).groups()
if (path, line_no) in to_ignore:
to_ignore[(path, line_no)].append(error_type)
else:
to_ignore[(path, line_no)] = [error_type]
print(len(to_ignore))
for (error_path, line_no), error_types in to_ignore.items():
all_errors = ", ".join(error_types)
full_path = f"{cwd}/{error_path}"
try:
with open(full_path, "r") as f:
file_lines = f.readlines()
except FileNotFoundError:
continue
file_lines[int(line_no) - 1] = (
file_lines[int(line_no) - 1][:-1] + f" # type: ignore[{all_errors}]\n"
)
with open(full_path, "w") as f:
f.write("".join(file_lines))
subprocess.run(
"poetry lock --no-update; poetry install --with lint; poetry run ruff format .; poetry run ruff --fix .",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
if __name__ == "__main__":
main()
|
"""python scripts/update_mypy_ruff.py"""
import glob
import tomllib
from pathlib import Path
import toml
import subprocess
import re
ROOT_DIR = Path(__file__).parents[1]
def main():
for path in glob.glob(str(ROOT_DIR / "libs/**/pyproject.toml"), recursive=True):
if "libs/cli/" in path:
continue
print(path)
with open(path, "rb") as f:
pyproject = tomllib.load(f)
try:
pyproject["tool"]["poetry"]["group"]["typing"]["dependencies"]["mypy"] = (
"^1.10"
)
pyproject["tool"]["poetry"]["group"]["lint"]["dependencies"]["ruff"] = (
"^0.5"
)
except KeyError:
continue
with open(path, "w") as f:
toml.dump(pyproject, f)
cwd = "/".join(path.split("/")[:-1])
subprocess.run(
"poetry lock --no-update; poetry install --with lint; poetry run ruff format .; poetry run ruff --select I --fix .",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
completed = subprocess.run(
"poetry lock --no-update; poetry install --with lint, typing; poetry run mypy . --no-color",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
logs = completed.stdout.split("\n")
to_ignore = {}
for l in logs:
if re.match("^(.*)\:(\d+)\: error:.*\[(.*)\]", l):
path, line_no, error_type = re.match(
"^(.*)\:(\d+)\: error:.*\[(.*)\]", l
).groups()
if (path, line_no) in to_ignore:
to_ignore[(path, line_no)].append(error_type)
else:
to_ignore[(path, line_no)] = [error_type]
print(len(to_ignore))
for (error_path, line_no), error_types in to_ignore.items():
all_errors = ", ".join(error_types)
full_path = f"{cwd}/{error_path}"
try:
with open(full_path, "r") as f:
file_lines = f.readlines()
except FileNotFoundError:
continue
file_lines[int(line_no) - 1] = (
file_lines[int(line_no) - 1][:-1] + f" # type: ignore[{all_errors}]\n"
)
with open(full_path, "w") as f:
f.write("".join(file_lines))
subprocess.run(
"poetry lock --no-update; poetry install --with lint; poetry run ruff format .; poetry run ruff --select I --fix .",
cwd=cwd,
shell=True,
capture_output=True,
text=True,
)
if __name__ == "__main__":
main()
|
import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = exact_div(
N_SAMPLES, HOP_LENGTH
) # 3000: number of frames in a mel spectrogram input
N_SAMPLES_PER_TOKEN = HOP_LENGTH * 2 # the initial convolutions has stride 2
FRAMES_PER_SECOND = exact_div(SAMPLE_RATE, HOP_LENGTH) # 10ms per audio frame
TOKENS_PER_SECOND = exact_div(SAMPLE_RATE, N_SAMPLES_PER_TOKEN) # 20ms per audio token
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(
dim=axis, index=torch.arange(length, device=array.device)
)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(
os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")
) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(
audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS
):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[..., :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
N_SAMPLES_PER_TOKEN = HOP_LENGTH * 2 # the initial convolutions has stride 2
FRAMES_PER_SECOND = exact_div(SAMPLE_RATE, HOP_LENGTH) # 100 mel frames in 1s (10ms each)
TOKENS_PER_SECOND = exact_div(SAMPLE_RATE, N_SAMPLES_PER_TOKEN) # 50 audio tokens in 1s (20ms each)
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(dim=axis, index=torch.arange(length, device=array.device))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[..., :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
from typing import Any, Union
from langchain_core.utils.json import parse_json_markdown
from langchain.evaluation.schema import StringEvaluator
class JsonSchemaEvaluator(StringEvaluator):
"""An evaluator that validates a JSON prediction against a JSON schema reference.
This evaluator checks if a given JSON prediction conforms to the provided JSON schema.
If the prediction is valid, the score is True (no errors). Otherwise, the score is False (error occurred).
Attributes:
requires_input (bool): Whether the evaluator requires input.
requires_reference (bool): Whether the evaluator requires reference.
evaluation_name (str): The name of the evaluation.
Examples:
evaluator = JsonSchemaEvaluator()
result = evaluator.evaluate_strings(
prediction='{"name": "John", "age": 30}',
reference={
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"}
}
}
)
assert result["score"] is not None
""" # noqa: E501
def __init__(self, **kwargs: Any) -> None:
"""Initializes the JsonSchemaEvaluator.
Args:
kwargs: Additional keyword arguments.
Raises:
ImportError: If the jsonschema package is not installed.
"""
super().__init__()
try:
import jsonschema # noqa: F401
except ImportError:
msg = (
"The JsonSchemaEvaluator requires the jsonschema package."
" Please install it with `pip install jsonschema`."
)
raise ImportError(msg)
@property
def requires_input(self) -> bool:
"""Returns whether the evaluator requires input."""
return False
@property
def requires_reference(self) -> bool:
"""Returns whether the evaluator requires reference."""
return True
@property
def evaluation_name(self) -> str:
"""Returns the name of the evaluation."""
return "json_schema_validation"
def _parse_json(self, node: Any) -> Union[dict, list, None, float, bool, int, str]:
if isinstance(node, str):
return parse_json_markdown(node)
elif hasattr(node, "schema") and callable(getattr(node, "schema")):
# Pydantic model
return getattr(node, "schema")()
return node
def _validate(self, prediction: Any, schema: Any) -> dict:
from jsonschema import ValidationError, validate
try:
validate(instance=prediction, schema=schema)
return {
"score": True,
}
except ValidationError as e:
return {"score": False, "reasoning": repr(e)}
def _evaluate_strings(
self,
prediction: Union[str, Any],
input: Union[str, Any] = None,
reference: Union[str, Any] = None,
**kwargs: Any,
) -> dict:
parsed_prediction = self._parse_json(prediction)
schema = self._parse_json(reference)
return self._validate(parsed_prediction, schema)
|
from typing import Any, Union
from langchain_core.utils.json import parse_json_markdown
from langchain.evaluation.schema import StringEvaluator
class JsonSchemaEvaluator(StringEvaluator):
"""An evaluator that validates a JSON prediction against a JSON schema reference.
This evaluator checks if a given JSON prediction conforms to the provided JSON schema.
If the prediction is valid, the score is True (no errors). Otherwise, the score is False (error occurred).
Attributes:
requires_input (bool): Whether the evaluator requires input.
requires_reference (bool): Whether the evaluator requires reference.
evaluation_name (str): The name of the evaluation.
Examples:
evaluator = JsonSchemaEvaluator()
result = evaluator.evaluate_strings(
prediction='{"name": "John", "age": 30}',
reference={
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"}
}
}
)
assert result["score"] is not None
""" # noqa: E501
def __init__(self, **kwargs: Any) -> None:
"""Initializes the JsonSchemaEvaluator.
Args:
kwargs: Additional keyword arguments.
Raises:
ImportError: If the jsonschema package is not installed.
"""
super().__init__()
try:
import jsonschema # noqa: F401
except ImportError:
raise ImportError(
"The JsonSchemaEvaluator requires the jsonschema package."
" Please install it with `pip install jsonschema`."
)
@property
def requires_input(self) -> bool:
"""Returns whether the evaluator requires input."""
return False
@property
def requires_reference(self) -> bool:
"""Returns whether the evaluator requires reference."""
return True
@property
def evaluation_name(self) -> str:
"""Returns the name of the evaluation."""
return "json_schema_validation"
def _parse_json(self, node: Any) -> Union[dict, list, None, float, bool, int, str]:
if isinstance(node, str):
return parse_json_markdown(node)
elif hasattr(node, "schema") and callable(getattr(node, "schema")):
# Pydantic model
return getattr(node, "schema")()
return node
def _validate(self, prediction: Any, schema: Any) -> dict:
from jsonschema import ValidationError, validate
try:
validate(instance=prediction, schema=schema)
return {
"score": True,
}
except ValidationError as e:
return {"score": False, "reasoning": repr(e)}
def _evaluate_strings(
self,
prediction: Union[str, Any],
input: Union[str, Any] = None,
reference: Union[str, Any] = None,
**kwargs: Any,
) -> dict:
parsed_prediction = self._parse_json(prediction)
schema = self._parse_json(reference)
return self._validate(parsed_prediction, schema)
|
_base_ = './mask2former_r50_8xb2-lsj-50e_coco-panoptic.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './mask2former_r50_lsj_8x2_50e_coco-panoptic.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
__all__ = ['reduce', 'reduce_all']
from typing import Dict, List, Optional
from docarray import DocList
def reduce(
left: DocList, right: DocList, left_id_map: Optional[Dict] = None
) -> 'DocList':
"""
Reduces left and right DocList into one DocList in-place.
Changes are applied to the left DocList.
Reducing 2 DocLists consists in adding Documents in the second DocList
to the first DocList if they do not exist.
If a Document exists in both DocLists (identified by ID),
the data properties are merged with priority to the left Document.
Nested DocLists are also reduced in the same way.
:param left: First DocList to be reduced. Changes will be applied to it
in-place
:param right: Second DocList to be reduced
:param left_id_map: Optional parameter to be passed in repeated calls
for optimizations, keeping a map of the Document ID to its offset
in the DocList
:return: Reduced DocList
"""
left_id_map = left_id_map or {doc.id: i for i, doc in enumerate(left)}
for doc in right:
if doc.id in left_id_map:
left[left_id_map[doc.id]].update(doc)
else:
left.append(doc)
return left
def reduce_all(docs: List[DocList]) -> DocList:
"""
Reduces a list of DocLists into one DocList.
Changes are applied to the first DocList in-place.
The resulting DocList contains Documents of all DocLists.
If a Document exists (identified by their ID) in many DocLists,
data properties are merged with priority to the left-most
DocLists (that is, if a data attribute is set in a Document
belonging to many DocLists, the attribute value of the left-most
DocList is kept).
Nested DocLists belonging to many DocLists
are also reduced in the same way.
!!! note
- Nested DocLists order does not follow any specific rule.
You might want to re-sort them in a later step.
- The final result depends on the order of DocLists
when applying reduction.
:param docs: List of DocLists to be reduced
:return: the resulting DocList
"""
if len(docs) <= 1:
raise Exception(
'In order to reduce DocLists' ' we should have more than one DocList'
)
left = docs[0]
others = docs[1:]
left_id_map = {doc.id: i for i, doc in enumerate(left)}
for other_docs in others:
reduce(left, other_docs, left_id_map)
return left
|
__all__ = ['reduce', 'reduce_all']
from typing import Dict, List, Optional
from docarray import DocList
def reduce(
left: DocList, right: DocList, left_id_map: Optional[Dict] = None
) -> 'DocList':
"""
Reduces left and right DocList into one DocList in-place.
Changes are applied to the left DocList.
Reducing 2 DocLists consists in adding Documents in the second DocList
to the first DocList if they do not exist.
If a Document exists in both DocLists (identified by ID),
the data properties are merged with priority to the left Document.
Nested DocLists are also reduced in the same way.
:param left: First DocList to be reduced. Changes will be applied to it
in-place
:param right: Second DocList to be reduced
:param left_id_map: Optional parameter to be passed in repeated calls
for optimizations, keeping a map of the Document ID to its offset
in the DocList
:return: Reduced DocList
"""
left_id_map = left_id_map or {doc.id: i for i, doc in enumerate(left)}
for doc in right:
if doc.id in left_id_map:
left[left_id_map[doc.id]].update(doc)
else:
left.append(doc)
return left
def reduce_all(docarrays: List[DocList]) -> DocList:
"""
Reduces a list of DocLists into one DocList.
Changes are applied to the first DocList in-place.
The resulting DocList contains Documents of all DocLists.
If a Document exists (identified by their ID) in many DocLists,
data properties are merged with priority to the left-most
DocLists (that is, if a data attribute is set in a Document
belonging to many DocLists, the attribute value of the left-most
DocList is kept).
Nested DocLists belonging to many DocLists
are also reduced in the same way.
.. note::
- Nested DocLists order does not follow any specific rule.
You might want to re-sort them in a later step.
- The final result depends on the order of DocLists
when applying reduction.
:param docarrays: List of DocLists to be reduced
:return: the resulting DocList
"""
if len(docarrays) <= 1:
raise Exception(
'In order to reduce DocLists' ' we should have more than one DocList'
)
left = docarrays[0]
others = docarrays[1:]
left_id_map = {doc.id: i for i, doc in enumerate(left)}
for docs in others:
reduce(left, docs, left_id_map)
return left
|
import pytest
from jina import Flow
num_calls = 0
@pytest.fixture(scope='function', autouse=True)
def patched_path_import(mocker):
from jina.importer import _path_import
def _wrapped_path_import(absolute_path: str):
global num_calls
num_calls += 1
assert num_calls < 2
return _path_import(absolute_path)
mocker.patch(
'jina.importer._path_import', new_callable=lambda: _wrapped_path_import
)
def test_single_import(patched_path_import):
flow = Flow().add(
uses='ExecutorImportedOnce',
py_modules=['executors/executor_fails_import_twice.py'],
)
with flow:
pass
def test_single_import_metas(patched_path_import):
flow = Flow().add(
uses='ExecutorImportedOnce',
py_modules=['executors/executor_fails_import_twice.py'],
)
with flow:
pass
|
import pytest
from jina import Flow
num_calls = 0
@pytest.fixture(scope='function', autouse=True)
def patched_path_import(mocker):
from jina.importer import _path_import
def _wrapped_path_import(absolute_path: str):
global num_calls
num_calls += 1
assert num_calls < 2
return _path_import(absolute_path)
mocker.patch(
'jina.importer._path_import', new_callable=lambda: _wrapped_path_import
)
def test_single_import(patched_path_import):
flow = Flow().add(
uses='ExecutorImportedOnce',
py_modules=['executors/executor_fails_import_twice.py'],
)
with flow:
pass
def test_single_import_metas(patched_path_import):
flow = Flow().add(
uses='ExecutorImportedOnce',
uses_metas=dict(py_modules=['executors/executor_fails_import_twice.py']),
)
with flow:
pass
|
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
guide = SparseEncoder("prithivida/Splade_PP_en_v1")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SparseGISTEmbedLoss(model, guide=guide)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
# TODO: Investigate if it's working with a test seems to have wrong hparam
|
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseEncoderTrainer,
SparseGISTEmbedLoss,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create a small toy dataset
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
# Initialize the sparse loss with a guide model
guide = SparseEncoder(
modules=[
MLMTransformer("prithivida/Splade_PP_en_v1"),
SpladePooling(pooling_strategy="max"),
],
device="cuda:0",
)
loss = SparseGISTEmbedLoss(model, guide=guide)
# Create the trainer
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
# Train the model
trainer.train()
|
import importlib
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py39
@pytest.fixture(
name="client",
params=[
"tutorial008b",
"tutorial008b_an",
pytest.param("tutorial008b_an_py39", marks=needs_py39),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.dependencies.{request.param}")
client = TestClient(mod.app)
return client
def test_get_no_item(client: TestClient):
response = client.get("/items/foo")
assert response.status_code == 404, response.text
assert response.json() == {"detail": "Item not found"}
def test_owner_error(client: TestClient):
response = client.get("/items/plumbus")
assert response.status_code == 400, response.text
assert response.json() == {"detail": "Owner error: Rick"}
def test_get_item(client: TestClient):
response = client.get("/items/portal-gun")
assert response.status_code == 200, response.text
assert response.json() == {"description": "Gun to create portals", "owner": "Rick"}
|
from fastapi.testclient import TestClient
from docs_src.dependencies.tutorial008b import app
client = TestClient(app)
def test_get_no_item():
response = client.get("/items/foo")
assert response.status_code == 404, response.text
assert response.json() == {"detail": "Item not found"}
def test_owner_error():
response = client.get("/items/plumbus")
assert response.status_code == 400, response.text
assert response.json() == {"detail": "Owner error: Rick"}
def test_get_item():
response = client.get("/items/portal-gun")
assert response.status_code == 200, response.text
assert response.json() == {"description": "Gun to create portals", "owner": "Rick"}
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import (HorizontalBoxes, bbox2distance,
distance2bbox, get_box_tensor)
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, clip_border=True, **kwargs):
super().__init__(**kwargs)
self.clip_border = clip_border
def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor or :obj:`BaseBoxes`): Shape (N, 4), The format
is "xyxy"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4).
"""
gt_bboxes = get_box_tensor(gt_bboxes)
assert points.size(0) == gt_bboxes.size(0)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(self, points, pred_bboxes, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Union[Tensor, :obj:`BaseBoxes`]: Boxes with shape (N, 4) or
(B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
bboxes = distance2bbox(points, pred_bboxes, max_shape)
if self.use_box_type:
bboxes = HorizontalBoxes(bboxes)
return bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.utils.misc import get_box_tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes, bbox2distance, distance2bbox
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, clip_border=True, **kwargs):
super().__init__(**kwargs)
self.clip_border = clip_border
def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor or :obj:`BaseBoxes`): Shape (N, 4), The format
is "xyxy"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4).
"""
gt_bboxes = get_box_tensor(gt_bboxes)
assert points.size(0) == gt_bboxes.size(0)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(self, points, pred_bboxes, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Union[Tensor, :obj:`BaseBoxes`]: Boxes with shape (N, 4) or
(B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
bboxes = distance2bbox(points, pred_bboxes, max_shape)
if self.use_box_type:
bboxes = HorizontalBoxes(bboxes)
return bboxes
|
import os
from jina import Executor, requests, DocumentArray
import socket
class TestExecutor(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from jina.logging.logger import JinaLogger
self.logger = JinaLogger(self.__class__.__name__)
self._name = self.runtime_args.name
@requests(on='/debug')
def debug(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
key = 'traversed-executors'
for doc in docs:
if key not in doc.tags:
doc.tags[key] = []
traversed = list(doc.tags.get(key))
traversed.append(self._name)
doc.tags[key] = traversed
doc.tags['parallel'] = self.runtime_args.replicas
doc.tags['shards'] = self.runtime_args.shards
doc.tags['shard_id'] = self.runtime_args.shard_id
doc.tags['hostname'] = socket.gethostname()
@requests(on='/env')
def env(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
for doc in docs:
doc.tags['k1'] = os.environ.get('k1')
doc.tags['k2'] = os.environ.get('k2')
doc.tags['JINA_LOG_LEVEL'] = os.environ.get('JINA_LOG_LEVEL')
doc.tags['env'] = {'k1': os.environ.get('k1'), 'k2': os.environ.get('k2')}
doc.tags['SECRET_USERNAME'] = os.environ.get('SECRET_USERNAME')
doc.tags['SECRET_PASSWORD'] = os.environ.get('SECRET_PASSWORD')
@requests(on='/cuda')
def cuda(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
import kubernetes
from kubernetes import client
api_client = client.ApiClient()
core_client = client.CoreV1Api(api_client=api_client)
try:
# try loading kube config from disk first
kubernetes.config.load_kube_config()
except kubernetes.config.config_exception.ConfigException:
# if the config could not be read from disk, try loading in cluster config
# this works if we are running inside k8s
kubernetes.config.load_incluster_config()
pods = core_client.list_namespaced_pod('test-gpu') # List[V1Pod]
pod_spec = pods[0].spec # V1PodSpec
pod_container = pod_spec.containers[0] # V1Container
pod_resources = pod_container.resources # V1ResourceRequirements
for doc in docs:
doc.tags['resources']['limits'] = pod_resources.limits
@requests(on='/workspace')
def foo_workspace(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
self.logger.debug(f'Workspace {self.workspace}.')
for doc in docs:
doc.tags['workspace'] = self.workspace
|
import os
from jina import Executor, requests, DocumentArray
import socket
class TestExecutor(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from jina.logging.logger import JinaLogger
self.logger = JinaLogger(self.__class__.__name__)
self._name = self.runtime_args.name
@requests(on='/debug')
def debug(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
key = 'traversed-executors'
for doc in docs:
if key not in doc.tags:
doc.tags[key] = []
traversed = list(doc.tags.get(key))
traversed.append(self._name)
doc.tags[key] = traversed
doc.tags['parallel'] = self.runtime_args.replicas
doc.tags['shards'] = self.runtime_args.shards
doc.tags['shard_id'] = self.runtime_args.shard_id
doc.tags['hostname'] = socket.gethostname()
@requests(on='/env')
def env(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
for doc in docs:
doc.tags['k1'] = os.environ.get('k1')
doc.tags['k2'] = os.environ.get('k2')
doc.tags['JINA_LOG_LEVEL'] = os.environ.get('JINA_LOG_LEVEL')
doc.tags['env'] = {'k1': os.environ.get('k1'), 'k2': os.environ.get('k2')}
@requests(on='/cuda')
def cuda(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
import kubernetes
from kubernetes import client
api_client = client.ApiClient()
core_client = client.CoreV1Api(api_client=api_client)
try:
# try loading kube config from disk first
kubernetes.config.load_kube_config()
except kubernetes.config.config_exception.ConfigException:
# if the config could not be read from disk, try loading in cluster config
# this works if we are running inside k8s
kubernetes.config.load_incluster_config()
pods = core_client.list_namespaced_pod('test-gpu') # List[V1Pod]
pod_spec = pods[0].spec # V1PodSpec
pod_container = pod_spec.containers[0] # V1Container
pod_resources = pod_container.resources # V1ResourceRequirements
for doc in docs:
doc.tags['resources']['limits'] = pod_resources.limits
@requests(on='/workspace')
def foo_workspace(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
self.logger.debug(f'Workspace {self.workspace}.')
for doc in docs:
doc.tags['workspace'] = self.workspace
|
from typing import Union, Iterable, Dict, List
import warnings
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
@staticmethod
def _parse_index_ids_from_bulk_info(
accumulated_info: List[Dict],
) -> Dict[str, List[int]]:
"""Parse ids from bulk info of failed send request to ES operation
:param accumulated_info: accumulated info of failed operation
:return: dict containing failed index ids of each operation type
"""
parsed_ids = {}
for info in accumulated_info:
for _op_type in info.keys():
if '_id' in info[_op_type]:
if _op_type not in parsed_ids:
parsed_ids[_op_type] = []
parsed_ids[_op_type].append(info[_op_type]['_id'])
return parsed_ids
def _upload_batch(self, docs: Iterable['Document'], **kwargs) -> List[int]:
requests = [self._document_to_elastic(doc) for doc in docs]
accumulated_info = self._send_requests(requests, **kwargs)
self._refresh(self._config.index_name)
successful_ids = self._parse_index_ids_from_bulk_info(accumulated_info)
if 'index' not in successful_ids:
return []
return successful_ids['index']
def extend(self, docs: Iterable['Document'], **kwargs):
docs = list(docs)
successful_indexed_ids = self._upload_batch(docs, **kwargs)
self._offset2ids.extend(
[_id for _id in successful_indexed_ids if _id not in self._offset2ids.ids]
)
if len(successful_indexed_ids) != len(docs):
doc_ids = [doc.id for doc in docs]
failed_index_ids = set(doc_ids) - set(successful_indexed_ids)
err_msg = f'fail to add Documents with ids: {failed_index_ids}'
warnings.warn(err_msg)
raise IndexError(err_msg)
|
from typing import Union, Iterable, Dict, List
import warnings
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
@staticmethod
def _parse_index_ids_from_bulk_info(
accumulated_info: List[Dict],
) -> Dict[str, List[int]]:
"""Parse ids from bulk info of failed send request to ES operation
:param accumulated_info: accumulated info of failed operation
:return: dict containing failed index ids of each operation type
"""
parsed_ids = {}
for info in accumulated_info:
for _op_type in info.keys():
if '_id' in info[_op_type]:
if _op_type not in parsed_ids:
parsed_ids[_op_type] = []
parsed_ids[_op_type].append(info[_op_type]['_id'])
return parsed_ids
def _upload_batch(self, docs: Iterable['Document']) -> List[int]:
batch = []
accumulated_info = []
for doc in docs:
batch.append(self._document_to_elastic(doc))
if len(batch) > self._config.batch_size:
accumulated_info.extend(self._send_requests(batch))
self._refresh(self._config.index_name)
batch = []
if len(batch) > 0:
accumulated_info.extend(self._send_requests(batch))
self._refresh(self._config.index_name)
successful_ids = self._parse_index_ids_from_bulk_info(accumulated_info)
if 'index' not in successful_ids:
return []
return successful_ids['index']
def extend(self, docs: Iterable['Document']):
docs = list(docs)
successful_indexed_ids = self._upload_batch(docs)
self._offset2ids.extend(
[_id for _id in successful_indexed_ids if _id not in self._offset2ids.ids]
)
if len(successful_indexed_ids) != len(docs):
doc_ids = [doc.id for doc in docs]
failed_index_ids = set(doc_ids) - set(successful_indexed_ids)
err_msg = f'fail to add Documents with ids: {failed_index_ids}'
warnings.warn(err_msg)
raise IndexError(err_msg)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.utils.image_utils import array_to_img as array_to_img
from keras.src.utils.image_utils import img_to_array as img_to_array
from keras.src.utils.image_utils import load_img as load_img
from keras.src.utils.image_utils import save_img as save_img
from keras.src.utils.image_utils import smart_resize as smart_resize
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.image_utils import smart_resize
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _FillType, _get_fill, _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import get_bounding_boxes, has_any, is_pure_tensor, query_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_pure_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_bounding_box_format(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop,
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = datapoints.wrap(inpt[params["is_valid"]], like=inpt)
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.wrap(
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
like=inpt,
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _FillType, _get_fill, _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import get_bounding_boxes, has_any, is_pure_tensor, query_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_pure_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_format_bounding_boxes(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop,
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = datapoints.wrap(inpt[params["is_valid"]], like=inpt)
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.wrap(
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
like=inpt,
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
from functools import lru_cache
import numpy as np
import torch
try:
import triton
import triton.language as tl
except ImportError:
raise RuntimeError("triton import failed; try `pip install --pre triton`")
@triton.jit
def dtw_kernel(
cost, trace, x, x_stride, cost_stride, trace_stride, N, M, BLOCK_SIZE: tl.constexpr
):
offsets = tl.arange(0, BLOCK_SIZE)
mask = offsets < M
for k in range(1, N + M + 1): # k = i + j
tl.debug_barrier()
p0 = cost + (k - 1) * cost_stride
p1 = cost + k * cost_stride
p2 = cost + k * cost_stride + 1
c0 = tl.load(p0 + offsets, mask=mask)
c1 = tl.load(p1 + offsets, mask=mask)
c2 = tl.load(p2 + offsets, mask=mask)
x_row = tl.load(x + (k - 1) * x_stride + offsets, mask=mask, other=0)
cost_row = x_row + tl.minimum(tl.minimum(c0, c1), c2)
cost_ptr = cost + (k + 1) * cost_stride + 1
tl.store(cost_ptr + offsets, cost_row, mask=mask)
trace_ptr = trace + (k + 1) * trace_stride + 1
tl.store(trace_ptr + offsets, 2, mask=mask & (c2 <= c0) & (c2 <= c1))
tl.store(trace_ptr + offsets, 1, mask=mask & (c1 <= c0) & (c1 <= c2))
tl.store(trace_ptr + offsets, 0, mask=mask & (c0 <= c1) & (c0 <= c2))
@lru_cache(maxsize=None)
def median_kernel(filter_width: int):
@triton.jit
def kernel(
y, x, x_stride, y_stride, BLOCK_SIZE: tl.constexpr
): # x.shape[-1] == filter_width
row_idx = tl.program_id(0)
offsets = tl.arange(0, BLOCK_SIZE)
mask = offsets < y_stride
x_ptr = x + row_idx * x_stride # noqa: F841
y_ptr = y + row_idx * y_stride
LOAD_ALL_ROWS_HERE # noqa: F821
BUBBLESORT_HERE # noqa: F821
tl.store(y_ptr + offsets, MIDDLE_ROW_HERE, mask=mask) # noqa: F821
kernel = triton.JITFunction(kernel.fn)
kernel.src = kernel.src.replace(
" LOAD_ALL_ROWS_HERE",
"\n".join(
[
f" row{i} = tl.load(x_ptr + offsets + {i}, mask=mask)"
for i in range(filter_width)
]
),
)
kernel.src = kernel.src.replace(
" BUBBLESORT_HERE",
"\n\n".join(
[
"\n\n".join(
[
"\n".join(
[
f" smaller = tl.where(row{j} < row{j + 1}, row{j}, row{j + 1})",
f" larger = tl.where(row{j} > row{j + 1}, row{j}, row{j + 1})",
f" row{j} = smaller",
f" row{j + 1} = larger",
]
)
for j in range(filter_width - i - 1)
]
)
for i in range(filter_width // 2 + 1)
]
),
)
kernel.src = kernel.src.replace("MIDDLE_ROW_HERE", f"row{filter_width // 2}")
return kernel
def median_filter_cuda(x: torch.Tensor, filter_width: int):
"""Apply a median filter of given width along the last dimension of x"""
slices = x.contiguous().unfold(-1, filter_width, 1)
grid = np.prod(slices.shape[:-2])
kernel = median_kernel(filter_width)
y = torch.empty_like(slices[..., 0])
BLOCK_SIZE = 1 << (y.stride(-2) - 1).bit_length()
kernel[(grid,)](y, x, x.stride(-2), y.stride(-2), BLOCK_SIZE=BLOCK_SIZE)
return y
|
import math
import numpy as np
import torch
from functools import lru_cache
try:
import triton
import triton.language as tl
except ImportError:
raise RuntimeError("triton import failed; try `pip install --pre triton`")
@triton.jit
def dtw_kernel(cost, trace, x, x_stride, cost_stride, trace_stride, N, M, BLOCK_SIZE: tl.constexpr):
offsets = tl.arange(0, BLOCK_SIZE)
mask = offsets < M
for k in range(1, N + M + 1): # k = i + j
tl.debug_barrier()
p0 = cost + (k - 1) * cost_stride
p1 = cost + k * cost_stride
p2 = cost + k * cost_stride + 1
c0 = tl.load(p0 + offsets, mask=mask)
c1 = tl.load(p1 + offsets, mask=mask)
c2 = tl.load(p2 + offsets, mask=mask)
x_row = tl.load(x + (k - 1) * x_stride + offsets, mask=mask, other=0)
cost_row = x_row + tl.minimum(tl.minimum(c0, c1), c2)
cost_ptr = cost + (k + 1) * cost_stride + 1
tl.store(cost_ptr + offsets, cost_row, mask=mask)
trace_ptr = trace + (k + 1) * trace_stride + 1
tl.store(trace_ptr + offsets, 2, mask=mask & (c2 <= c0) & (c2 <= c1))
tl.store(trace_ptr + offsets, 1, mask=mask & (c1 <= c0) & (c1 <= c2))
tl.store(trace_ptr + offsets, 0, mask=mask & (c0 <= c1) & (c0 <= c2))
@lru_cache(maxsize=None)
def median_kernel(filter_width: int):
@triton.jit
def kernel(y, x, x_stride, y_stride, BLOCK_SIZE: tl.constexpr): # x.shape[-1] == filter_width
row_idx = tl.program_id(0)
offsets = tl.arange(0, BLOCK_SIZE)
mask = offsets < y_stride
x_ptr = x + row_idx * x_stride
y_ptr = y + row_idx * y_stride
LOAD_ALL_ROWS_HERE
BUBBLESORT_HERE
tl.store(y_ptr + offsets, MIDDLE_ROW_HERE, mask=mask)
kernel = triton.JITFunction(kernel.fn)
kernel.src = kernel.src.replace(" LOAD_ALL_ROWS_HERE", "\n".join([
f" row{i} = tl.load(x_ptr + offsets + {i}, mask=mask)"
for i in range(filter_width)
]))
kernel.src = kernel.src.replace(" BUBBLESORT_HERE", "\n\n".join([
"\n\n".join([
"\n".join([
f" smaller = tl.where(row{j} < row{j + 1}, row{j}, row{j + 1})",
f" larger = tl.where(row{j} > row{j + 1}, row{j}, row{j + 1})",
f" row{j} = smaller",
f" row{j + 1} = larger",
])
for j in range(filter_width - i - 1)
])
for i in range(filter_width // 2 + 1)
]))
kernel.src = kernel.src.replace("MIDDLE_ROW_HERE", f"row{filter_width // 2}")
return kernel
def median_filter_cuda(x: torch.Tensor, filter_width: int):
"""Apply a median filter of given width along the last dimension of x"""
slices = x.contiguous().unfold(-1, filter_width, 1)
grid = np.prod(slices.shape[:-2])
kernel = median_kernel(filter_width)
y = torch.empty_like(slices[..., 0])
BLOCK_SIZE = 1 << (y.stride(-2) - 1).bit_length()
kernel[(grid,)](y, x, x.stride(-2), y.stride(-2), BLOCK_SIZE=BLOCK_SIZE)
return y
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=4, # simply change this from 2 to 16 for 50e - 400e training.
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
max_epochs = 25
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=5)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
# only keep latest 2 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=4, # simply change this from 2 to 16 for 50e - 400e training.
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
max_epochs = 25
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=5)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
# only keep latest 2 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from packaging import version
from .. import __version__
from .constants import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_DYNAMIC_MODULE_NAME,
FLAX_WEIGHTS_NAME,
GGUF_FILE_EXTENSION,
HF_MODULES_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MIN_PEFT_VERSION,
ONNX_EXTERNAL_WEIGHTS_NAME,
ONNX_WEIGHTS_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFETENSORS_FILE_EXTENSION,
SAFETENSORS_WEIGHTS_NAME,
USE_PEFT_BACKEND,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .deprecation_utils import deprecate
from .doc_utils import replace_example_docstring
from .dynamic_modules_utils import get_class_from_dynamic_module
from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video
from .hub_utils import (
PushToHubMixin,
_add_variant,
_get_checkpoint_shard_files,
_get_model_file,
extract_commit_hash,
http_user_agent,
)
from .import_utils import (
BACKENDS_MAPPING,
DIFFUSERS_SLOW_IMPORT,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_accelerate_available,
is_accelerate_version,
is_better_profanity_available,
is_bitsandbytes_available,
is_bitsandbytes_version,
is_bs4_available,
is_cosmos_guardrail_available,
is_flax_available,
is_ftfy_available,
is_gguf_available,
is_gguf_version,
is_google_colab,
is_hf_hub_version,
is_hpu_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_matplotlib_available,
is_nltk_available,
is_note_seq_available,
is_onnx_available,
is_opencv_available,
is_optimum_quanto_available,
is_optimum_quanto_version,
is_peft_available,
is_peft_version,
is_pytorch_retinaface_available,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_tensorboard_available,
is_timm_available,
is_torch_available,
is_torch_npu_available,
is_torch_version,
is_torch_xla_available,
is_torch_xla_version,
is_torchao_available,
is_torchao_version,
is_torchsde_available,
is_torchvision_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
is_wandb_available,
is_xformers_available,
requires_backends,
)
from .loading_utils import get_module_from_name, get_submodule_by_name, load_image, load_video
from .logging import get_logger
from .outputs import BaseOutput
from .peft_utils import (
check_peft_version,
delete_adapter_layers,
get_adapter_name,
get_peft_kwargs,
recurse_remove_peft_layers,
scale_lora_layers,
set_adapter_layers,
set_weights_and_activate_adapters,
unscale_lora_layers,
)
from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil
from .remote_utils import remote_decode
from .state_dict_utils import (
convert_all_state_dict_to_peft,
convert_state_dict_to_diffusers,
convert_state_dict_to_kohya,
convert_state_dict_to_peft,
convert_unet_state_dict_to_peft,
state_dict_all_zero,
)
from .typing_utils import _get_detailed_type, _is_valid_type
logger = get_logger(__name__)
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace diffusers (see "
"`https://huggingface.co/docs/diffusers/installation#install-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(error_message)
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from packaging import version
from .. import __version__
from .constants import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_DYNAMIC_MODULE_NAME,
FLAX_WEIGHTS_NAME,
GGUF_FILE_EXTENSION,
HF_MODULES_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MIN_PEFT_VERSION,
ONNX_EXTERNAL_WEIGHTS_NAME,
ONNX_WEIGHTS_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFETENSORS_FILE_EXTENSION,
SAFETENSORS_WEIGHTS_NAME,
USE_PEFT_BACKEND,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .deprecation_utils import deprecate
from .doc_utils import replace_example_docstring
from .dynamic_modules_utils import get_class_from_dynamic_module
from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video
from .hub_utils import (
PushToHubMixin,
_add_variant,
_get_checkpoint_shard_files,
_get_model_file,
extract_commit_hash,
http_user_agent,
)
from .import_utils import (
BACKENDS_MAPPING,
DIFFUSERS_SLOW_IMPORT,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_accelerate_available,
is_accelerate_version,
is_bitsandbytes_available,
is_bitsandbytes_version,
is_bs4_available,
is_flax_available,
is_ftfy_available,
is_gguf_available,
is_gguf_version,
is_google_colab,
is_hf_hub_version,
is_hpu_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_matplotlib_available,
is_note_seq_available,
is_onnx_available,
is_opencv_available,
is_optimum_quanto_available,
is_optimum_quanto_version,
is_peft_available,
is_peft_version,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_tensorboard_available,
is_timm_available,
is_torch_available,
is_torch_npu_available,
is_torch_version,
is_torch_xla_available,
is_torch_xla_version,
is_torchao_available,
is_torchao_version,
is_torchsde_available,
is_torchvision_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
is_wandb_available,
is_xformers_available,
requires_backends,
)
from .loading_utils import get_module_from_name, get_submodule_by_name, load_image, load_video
from .logging import get_logger
from .outputs import BaseOutput
from .peft_utils import (
check_peft_version,
delete_adapter_layers,
get_adapter_name,
get_peft_kwargs,
recurse_remove_peft_layers,
scale_lora_layers,
set_adapter_layers,
set_weights_and_activate_adapters,
unscale_lora_layers,
)
from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil
from .remote_utils import remote_decode
from .state_dict_utils import (
convert_all_state_dict_to_peft,
convert_state_dict_to_diffusers,
convert_state_dict_to_kohya,
convert_state_dict_to_peft,
convert_unet_state_dict_to_peft,
state_dict_all_zero,
)
from .typing_utils import _get_detailed_type, _is_valid_type
logger = get_logger(__name__)
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace diffusers (see "
"`https://huggingface.co/docs/diffusers/installation#install-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(error_message)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_root_logger
__all__ = ['get_root_logger', 'collect_env']
|
from .collect_env import collect_env
from .logger import get_root_logger
__all__ = ['get_root_logger', 'collect_env']
|
"""Prompts for comparing the outputs of two models for a given question.
This prompt is used to compare two responses and evaluate which one best follows the instructions
and answers the question. The prompt is based on the paper from
Zheng, et. al. https://arxiv.org/abs/2306.05685
""" # noqa: E501
from langchain_core.prompts.chat import ChatPromptTemplate
SYSTEM_MESSAGE = 'Please act as an impartial judge and evaluate the quality \
of the responses provided by two AI assistants to the user question displayed below. \
You should choose the assistant that follows the user\'s instructions \
and answers \the user\'s question better. \
Your evaluation should consider factors such as the \
helpfulness, relevance, accuracy, depth, creativity, \
and level of detail of their responses. \
Begin your evaluation by comparing the two responses and provide a short explanation. \
Avoid any position biases and ensure that the order in which \
the responses were presented does not influence your decision. \
Do not allow the length of the responses to influence your evaluation. \
Do not favor certain names of the assistants. Be as objective as possible. \
After providing your explanation, output your final verdict by strictly following \
this format: "[[A]]" if assistant A is better, "[[B]]" if assistant B is better, \
and "[[C]]" for a tie.'
CRITERIA_INSTRUCTIONS = (
"For this evaluation, you should primarily consider the following criteria:\n"
)
COMPARISON_TEMPLATE = ChatPromptTemplate.from_messages(
[
("system", SYSTEM_MESSAGE),
(
"human",
"{criteria}[User Question]\n{input}\n\n\
[The Start of Assistant A's Answer]\n{prediction}\n\
[The End of Assistant A's Answer]\
\n\n[The Start of Assistant B's Answer]\n{prediction_b}\n\
[The End of Assistant B's Answer]",
),
]
)
COMPARISON_TEMPLATE_WITH_REFERENCE = ChatPromptTemplate.from_messages(
[
("system", SYSTEM_MESSAGE),
(
"human",
"{criteria}\n\nTo help you evaluate the responses, \
here is a reference answer to the user's question:\n\
{reference}\
[User Question]\n{input}\n\n\
[The Start of Assistant A's Answer]\n{prediction}\n\
[The End of Assistant A's Answer]\
\n\n[The Start of Assistant B's Answer]\n{prediction_b}\n\
[The End of Assistant B's Answer]",
),
]
)
|
"""Prompts for comparing the outputs of two models for a given question.
This prompt is used to compare two responses and evaluate which one best follows the instructions
and answers the question. The prompt is based on the paper from
Zheng, et. al. https://arxiv.org/abs/2306.05685
"""
# flake8: noqa
from langchain_core.prompts.chat import ChatPromptTemplate
SYSTEM_MESSAGE = 'Please act as an impartial judge and evaluate the quality \
of the responses provided by two AI assistants to the user question displayed below. \
You should choose the assistant that follows the user\'s instructions \
and answers \the user\'s question better. \
Your evaluation should consider factors such as the \
helpfulness, relevance, accuracy, depth, creativity, \
and level of detail of their responses. \
Begin your evaluation by comparing the two responses and provide a short explanation. \
Avoid any position biases and ensure that the order in which \
the responses were presented does not influence your decision. \
Do not allow the length of the responses to influence your evaluation. \
Do not favor certain names of the assistants. Be as objective as possible. \
After providing your explanation, output your final verdict by strictly following \
this format: "[[A]]" if assistant A is better, "[[B]]" if assistant B is better, \
and "[[C]]" for a tie.'
CRITERIA_INSTRUCTIONS = (
"For this evaluation, you should primarily consider the following criteria:\n"
)
COMPARISON_TEMPLATE = ChatPromptTemplate.from_messages(
[
("system", SYSTEM_MESSAGE),
(
"human",
"{criteria}[User Question]\n{input}\n\n\
[The Start of Assistant A's Answer]\n{prediction}\n\
[The End of Assistant A's Answer]\
\n\n[The Start of Assistant B's Answer]\n{prediction_b}\n\
[The End of Assistant B's Answer]",
),
]
)
COMPARISON_TEMPLATE_WITH_REFERENCE = ChatPromptTemplate.from_messages(
[
("system", SYSTEM_MESSAGE),
(
"human",
"{criteria}\n\nTo help you evaluate the responses, \
here is a reference answer to the user's question:\n\
{reference}\
[User Question]\n{input}\n\n\
[The Start of Assistant A's Answer]\n{prediction}\n\
[The End of Assistant A's Answer]\
\n\n[The Start of Assistant B's Answer]\n{prediction_b}\n\
[The End of Assistant B's Answer]",
),
]
)
|
from torchaudio import ( # noqa: F401
_extension,
compliance,
datasets,
functional,
io,
kaldi_io,
models,
pipelines,
sox_effects,
transforms,
utils,
)
from torchaudio.backend import get_audio_backend, list_audio_backends, set_audio_backend
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
__all__ = [
"io",
"compliance",
"datasets",
"functional",
"models",
"pipelines",
"kaldi_io",
"utils",
"sox_effects",
"transforms",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
|
from torchaudio import _extension # noqa: F401
from torchaudio import (
io,
compliance,
datasets,
functional,
models,
pipelines,
kaldi_io,
utils,
sox_effects,
transforms,
)
from torchaudio.backend import (
list_audio_backends,
get_audio_backend,
set_audio_backend,
)
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
__all__ = [
"io",
"compliance",
"datasets",
"functional",
"models",
"pipelines",
"kaldi_io",
"utils",
"sox_effects",
"transforms",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.9.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.8.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from mmdet.registry import TASK_UTILS
MATCH_COST = TASK_UTILS
def build_match_cost(cfg, default_args=None):
"""Builder of IoU calculator."""
warnings.warn('``build_match_cost`` would be deprecated soon, please use '
'``mmdet.registry.TASK_UTILS.build()`` ')
return TASK_UTILS.build(cfg, default_args=default_args)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import Registry, build_from_cfg
MATCH_COST = Registry('Match Cost')
def build_match_cost(cfg, default_args=None):
"""Builder of IoU calculator."""
return build_from_cfg(cfg, MATCH_COST, default_args)
|
import csv
import os
from pathlib import Path
from typing import Dict, List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
def load_commonvoice_item(
line: List[str], header: List[str], path: str, folder_audio: str, ext_audio: str
) -> Tuple[Tensor, int, Dict[str, str]]:
# Each line as the following data:
# client_id, path, sentence, up_votes, down_votes, age, gender, accent
assert header[1] == "path"
fileid = line[1]
filename = os.path.join(path, folder_audio, fileid)
if not filename.endswith(ext_audio):
filename += ext_audio
waveform, sample_rate = torchaudio.load(filename)
dic = dict(zip(header, line))
return waveform, sample_rate, dic
class COMMONVOICE(Dataset):
"""Create a Dataset for *CommonVoice* [:footcite:`ardila2020common`].
Args:
root (str or Path): Path to the directory where the dataset is located.
(Where the ``tsv`` file is present.)
tsv (str, optional):
The name of the tsv file used to construct the metadata, such as
``"train.tsv"``, ``"test.tsv"``, ``"dev.tsv"``, ``"invalidated.tsv"``,
``"validated.tsv"`` and ``"other.tsv"``. (default: ``"train.tsv"``)
"""
_ext_txt = ".txt"
_ext_audio = ".mp3"
_folder_audio = "clips"
def __init__(self, root: Union[str, Path], tsv: str = "train.tsv") -> None:
# Get string representation of 'root' in case Path object is passed
self._path = os.fspath(root)
self._tsv = os.path.join(self._path, tsv)
with open(self._tsv, "r") as tsv_:
walker = csv.reader(tsv_, delimiter="\t")
self._header = next(walker)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Dict[str, str]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Dict[str, str]): ``(waveform, sample_rate, dictionary)``, where dictionary
is built from the TSV file with the following keys: ``client_id``, ``path``, ``sentence``,
``up_votes``, ``down_votes``, ``age``, ``gender`` and ``accent``.
"""
line = self._walker[n]
return load_commonvoice_item(line, self._header, self._path, self._folder_audio, self._ext_audio)
def __len__(self) -> int:
return len(self._walker)
|
import csv
import os
from pathlib import Path
from typing import List, Dict, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
def load_commonvoice_item(
line: List[str], header: List[str], path: str, folder_audio: str, ext_audio: str
) -> Tuple[Tensor, int, Dict[str, str]]:
# Each line as the following data:
# client_id, path, sentence, up_votes, down_votes, age, gender, accent
assert header[1] == "path"
fileid = line[1]
filename = os.path.join(path, folder_audio, fileid)
if not filename.endswith(ext_audio):
filename += ext_audio
waveform, sample_rate = torchaudio.load(filename)
dic = dict(zip(header, line))
return waveform, sample_rate, dic
class COMMONVOICE(Dataset):
"""Create a Dataset for *CommonVoice* [:footcite:`ardila2020common`].
Args:
root (str or Path): Path to the directory where the dataset is located.
(Where the ``tsv`` file is present.)
tsv (str, optional):
The name of the tsv file used to construct the metadata, such as
``"train.tsv"``, ``"test.tsv"``, ``"dev.tsv"``, ``"invalidated.tsv"``,
``"validated.tsv"`` and ``"other.tsv"``. (default: ``"train.tsv"``)
"""
_ext_txt = ".txt"
_ext_audio = ".mp3"
_folder_audio = "clips"
def __init__(self, root: Union[str, Path], tsv: str = "train.tsv") -> None:
# Get string representation of 'root' in case Path object is passed
self._path = os.fspath(root)
self._tsv = os.path.join(self._path, tsv)
with open(self._tsv, "r") as tsv_:
walker = csv.reader(tsv_, delimiter="\t")
self._header = next(walker)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Dict[str, str]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Dict[str, str]): ``(waveform, sample_rate, dictionary)``, where dictionary
is built from the TSV file with the following keys: ``client_id``, ``path``, ``sentence``,
``up_votes``, ``down_votes``, ``age``, ``gender`` and ``accent``.
"""
line = self._walker[n]
return load_commonvoice_item(line, self._header, self._path, self._folder_audio, self._ext_audio)
def __len__(self) -> int:
return len(self._walker)
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.13.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.13.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
import pytest
import mmengine
def test_timer_init():
timer = mmengine.Timer(start=False)
assert not timer.is_running
timer.start()
assert timer.is_running
timer = mmengine.Timer()
assert timer.is_running
def test_timer_run():
timer = mmengine.Timer()
time.sleep(1)
# In Windows, the error could be larger than 20ms. More details in
# https://stackoverflow.com/questions/11657734/sleep-for-exact-time-in-python. # noqa: E501
assert abs(timer.since_start() - 1) < 3e-2
time.sleep(1)
assert abs(timer.since_last_check() - 1) < 3e-2
assert abs(timer.since_start() - 2) < 3e-2
timer = mmengine.Timer(False)
with pytest.raises(mmengine.TimerError):
timer.since_start()
with pytest.raises(mmengine.TimerError):
timer.since_last_check()
def test_timer_context(capsys):
with mmengine.Timer():
time.sleep(1)
out, _ = capsys.readouterr()
# In Windows, the error could be larger than 20ms. More details in
# https://stackoverflow.com/questions/11657734/sleep-for-exact-time-in-python. # noqa: E501
assert abs(float(out) - 1) < 3e-2
with mmengine.Timer(print_tmpl='time: {:.1f}s'):
time.sleep(1)
out, _ = capsys.readouterr()
assert out == 'time: 1.0s\n'
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
import pytest
import mmengine
def test_timer_init():
timer = mmengine.Timer(start=False)
assert not timer.is_running
timer.start()
assert timer.is_running
timer = mmengine.Timer()
assert timer.is_running
def test_timer_run():
timer = mmengine.Timer()
time.sleep(1)
assert abs(timer.since_start() - 1) < 1e-2
time.sleep(1)
assert abs(timer.since_last_check() - 1) < 1e-2
assert abs(timer.since_start() - 2) < 1e-2
timer = mmengine.Timer(False)
with pytest.raises(mmengine.TimerError):
timer.since_start()
with pytest.raises(mmengine.TimerError):
timer.since_last_check()
def test_timer_context(capsys):
with mmengine.Timer():
time.sleep(1)
out, _ = capsys.readouterr()
assert abs(float(out) - 1) < 1e-2
with mmengine.Timer(print_tmpl='time: {:.1f}s'):
time.sleep(1)
out, _ = capsys.readouterr()
assert out == 'time: 1.0s\n'
|
import os
import pytest
import requests
from jina import Flow
from tests.helper import (
ProcessExecutor,
_validate_custom_gateway_process,
_validate_dummy_custom_gateway_response,
)
from tests.unit.yaml.dummy_gateway import DummyGateway
from tests.unit.yaml.dummy_gateway_get_streamer import DummyGatewayGetStreamer
cur_dir = os.path.dirname(os.path.abspath(__file__))
_dummy_gateway_yaml_path = os.path.join(
cur_dir, '../../../yaml/test-custom-gateway.yml'
)
_dummy_fastapi_gateway_yaml_path = os.path.join(
cur_dir, '../../../yaml/test-fastapi-gateway.yml'
)
_flow_with_dummy_gateway_yaml_path = os.path.join(
cur_dir, '../../../yaml/test-flow-custom-gateway-nested-config.yml'
)
@pytest.mark.parametrize(
'uses,uses_with,expected',
[
(DummyGateway, {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}),
(
DummyGatewayGetStreamer,
{},
{'arg1': None, 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
DummyGateway,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
DummyGatewayGetStreamer,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
DummyGateway,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
DummyGatewayGetStreamer,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
],
)
def test_flow_custom_gateway_no_executor(uses, uses_with, expected):
flow = (
Flow().config_gateway(uses=uses, uses_with=uses_with).add(uses=ProcessExecutor)
)
with flow:
_validate_dummy_custom_gateway_response(flow.port, expected)
_validate_custom_gateway_process(
flow.port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
def test_flow_custom_gateway_nested_config():
flow = Flow.load_config(_flow_with_dummy_gateway_yaml_path)
with flow:
_validate_dummy_custom_gateway_response(
flow.port, {'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'}
)
def test_flow_custom_gateway_via_flow_uses_disabled():
uses_with = {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}
flow = Flow(uses='DummyGateway', uses_with=uses_with)
# the uses parameter is ignored here and not be applied on the gateway, therefore, the gateway
# is just a GRPC gateway
with pytest.raises(requests.ConnectionError):
with flow:
_ = requests.get(f'http://127.0.0.1:{flow.port}/').json()
|
import os
import pytest
import requests
from jina import Flow
from tests.helper import (
ProcessExecutor,
_validate_custom_gateway_process,
_validate_dummy_custom_gateway_response,
)
from tests.unit.yaml.dummy_gateway import DummyGateway
cur_dir = os.path.dirname(os.path.abspath(__file__))
_dummy_gateway_yaml_path = os.path.join(
cur_dir, '../../../yaml/test-custom-gateway.yml'
)
_dummy_fastapi_gateway_yaml_path = os.path.join(
cur_dir, '../../../yaml/test-fastapi-gateway.yml'
)
_flow_with_dummy_gateway_yaml_path = os.path.join(
cur_dir, '../../../yaml/test-flow-custom-gateway-nested-config.yml'
)
@pytest.mark.parametrize(
'uses,uses_with,expected',
[
('DummyGateway', {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}),
(
_dummy_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
],
)
def test_flow_custom_gateway_no_executor(uses, uses_with, expected):
flow = (
Flow()
.config_gateway(uses=uses, uses_with=uses_with)
.add(uses='ProcessExecutor')
)
with flow:
_validate_dummy_custom_gateway_response(flow.port, expected)
_validate_custom_gateway_process(
flow.port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
def test_flow_custom_gateway_nested_config():
flow = Flow.load_config(_flow_with_dummy_gateway_yaml_path)
with flow:
_validate_dummy_custom_gateway_response(
flow.port, {'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'}
)
def test_flow_custom_gateway_via_flow_uses_disabled():
uses_with = {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}
flow = Flow(uses='DummyGateway', uses_with=uses_with)
# the uses parameter is ignored here and not be applied on the gateway, therefore, the gateway
# is just a GRPC gateway
with pytest.raises(requests.ConnectionError):
with flow:
_ = requests.get(f'http://127.0.0.1:{flow.port}/').json()
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
T = TypeVar('T', bound='Image')
try:
import torch
torch_available = True
except ImportError:
torch_available = False
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Image
# use it directly
image = Image(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import Image
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(Image):
second_embedding: Optional[AnyEmbedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes.load()
"""
url: Optional[ImageUrl]
tensor: Optional[ImageTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[ImageBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
elif isinstance(value, bytes):
value = cls(byte=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
T = TypeVar('T', bound='Image')
try:
import torch
torch_available = True
except ImportError:
torch_available = False
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Image
# use it directly
image = Image(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import Image
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(Image):
second_embedding: Optional[AnyEmbedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
"""
url: Optional[ImageUrl]
tensor: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
"""Dappier Real Time Search tool spec."""
import os
from typing import Optional
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class DappierRealTimeSearchToolSpec(BaseToolSpec):
"""Dappier Real Time Search tool spec."""
spec_functions = ["search_real_time_data", "search_stock_market_data"]
def __init__(self, api_key: Optional[str] = None) -> None:
"""
Initialize the Dappier Real Time Search tool spec.
To obtain an API key, visit: https://platform.dappier.com/profile/api-keys
"""
from dappier import Dappier
self.api_key = api_key or os.environ.get("DAPPIER_API_KEY")
if not self.api_key:
raise ValueError(
"API key is required. Provide it as a parameter or set DAPPIER_API_KEY in environment variables.\n"
"To obtain an API key, visit: https://platform.dappier.com/profile/api-keys"
)
self.client = Dappier(api_key=self.api_key)
def search_real_time_data(self, query: str) -> str:
"""
Performs a real-time data search.
Args:
query (str): The user-provided input string for retrieving
real-time google web search results including the latest news,
weather, travel, deals and more.
Returns:
str: A response message containing the real-time data results.
"""
ai_model_id = "am_01j0rzq4tvfscrgzwac7jv1p4c"
response = self.client.search_real_time_data(
query=query, ai_model_id=ai_model_id
)
return response.message if response else "No real-time data found."
def search_stock_market_data(self, query: str) -> str:
"""
Performs a stock market data search.
Args:
query (str): The user-provided input string for retrieving
real-time financial news, stock prices, and trades from polygon.io,
with AI-powered insights and up-to-the-minute updates to keep you
informed on all your financial interests.
Returns:
str: A response message containing the stock market data results.
"""
ai_model_id = "am_01j749h8pbf7ns8r1bq9s2evrh"
response = self.client.search_real_time_data(
query=query, ai_model_id=ai_model_id
)
return response.message if response else "No stock market data found."
|
"""Dappier Real Time Search tool spec."""
import os
from typing import Optional
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class DappierRealTimeSearchToolSpec(BaseToolSpec):
"""Dappier Real Time Search tool spec."""
spec_functions = ["search_real_time_data", "search_stock_market_data"]
def __init__(self, api_key: Optional[str] = None) -> None:
"""Initialize the Dappier Real Time Search tool spec.
To obtain an API key, visit: https://platform.dappier.com/profile/api-keys
"""
from dappier import Dappier
self.api_key = api_key or os.environ.get("DAPPIER_API_KEY")
if not self.api_key:
raise ValueError(
"API key is required. Provide it as a parameter or set DAPPIER_API_KEY in environment variables.\n"
"To obtain an API key, visit: https://platform.dappier.com/profile/api-keys"
)
self.client = Dappier(api_key=self.api_key)
def search_real_time_data(self, query: str) -> str:
"""
Performs a real-time data search.
Args:
query (str): The user-provided input string for retrieving
real-time google web search results including the latest news,
weather, travel, deals and more.
Returns:
str: A response message containing the real-time data results.
"""
ai_model_id = "am_01j0rzq4tvfscrgzwac7jv1p4c"
response = self.client.search_real_time_data(
query=query, ai_model_id=ai_model_id
)
return response.message if response else "No real-time data found."
def search_stock_market_data(self, query: str) -> str:
"""
Performs a stock market data search.
Args:
query (str): The user-provided input string for retrieving
real-time financial news, stock prices, and trades from polygon.io,
with AI-powered insights and up-to-the-minute updates to keep you
informed on all your financial interests.
Returns:
str: A response message containing the stock market data results.
"""
ai_model_id = "am_01j749h8pbf7ns8r1bq9s2evrh"
response = self.client.search_real_time_data(
query=query, ai_model_id=ai_model_id
)
return response.message if response else "No stock market data found."
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from ...audioclip_image import AudioCLIPImageEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(blob=np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8))
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=AudioCLIPImageEncoder) as flow:
resp = flow.post(
on="/index",
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (1024,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
'--volumes=.cache:/workdir/.cache',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
'--volumes=.cache:/workdir/.cache',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Callable
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from ...audioclip_image import AudioCLIPImageEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(blob=np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8))
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=AudioCLIPImageEncoder) as flow:
resp = flow.post(
on="/index",
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (1024,)
|
"""Loads word documents."""
import os
import tempfile
from abc import ABC
from pathlib import Path
from typing import Any, List, Union
from urllib.parse import urlparse
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class Docx2txtLoader(BaseLoader, ABC):
"""Load `DOCX` file using `docx2txt` and chunks at character level.
Defaults to check for local file, but if the file is a web path, it will download it
to a temporary file, and use that, then clean up the temporary file after completion
"""
def __init__(self, file_path: Union[str, Path]):
"""Initialize with file path."""
self.file_path = str(file_path)
self.original_file_path = self.file_path
if "~" in self.file_path:
self.file_path = os.path.expanduser(self.file_path)
# If the file is a web path, download it to a temporary file, and use that
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
r = requests.get(self.file_path)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s"
% r.status_code
)
self.web_path = self.file_path
self.temp_file = tempfile.NamedTemporaryFile()
self.temp_file.write(r.content)
self.file_path = self.temp_file.name
elif not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file or url" % self.file_path)
def __del__(self) -> None:
if hasattr(self, "temp_file"):
self.temp_file.close()
def load(self) -> List[Document]:
"""Load given path as single page."""
import docx2txt
return [
Document(
page_content=docx2txt.process(self.file_path),
metadata={"source": self.original_file_path},
)
]
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
class UnstructuredWordDocumentLoader(UnstructuredFileLoader):
"""Load `Microsoft Word` file using `Unstructured`.
Works with both .docx and .doc files.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredWordDocumentLoader
loader = UnstructuredWordDocumentLoader(
"example.docx", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-docx
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Args:
file_path: The path to the Word file to load.
mode: The mode to use when loading the file. Can be one of "single",
"multi", or "all". Default is "single".
**unstructured_kwargs: Any kwargs to pass to the unstructured.
"""
file_path = str(file_path)
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.file_utils.filetype import FileType, detect_filetype
# NOTE(MthwRobinson) - magic will raise an import error if the libmagic
# system dependency isn't installed. If it's not installed, we'll just
# check the file extension
try:
import magic # noqa: F401
is_doc = detect_filetype(self.file_path) == FileType.DOC
except ImportError:
_, extension = os.path.splitext(str(self.file_path))
is_doc = extension == ".doc"
if is_doc:
validate_unstructured_version("0.4.11")
if is_doc:
from unstructured.partition.doc import partition_doc
return partition_doc(filename=self.file_path, **self.unstructured_kwargs)
else:
from unstructured.partition.docx import partition_docx
return partition_docx(filename=self.file_path, **self.unstructured_kwargs)
|
"""Loads word documents."""
import os
import tempfile
from abc import ABC
from pathlib import Path
from typing import Any, List, Union
from urllib.parse import urlparse
import requests
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class Docx2txtLoader(BaseLoader, ABC):
"""Load `DOCX` file using `docx2txt` and chunks at character level.
Defaults to check for local file, but if the file is a web path, it will download it
to a temporary file, and use that, then clean up the temporary file after completion
"""
def __init__(self, file_path: Union[str, Path]):
"""Initialize with file path."""
self.file_path = str(file_path)
self.original_file_path = self.file_path
if "~" in self.file_path:
self.file_path = os.path.expanduser(self.file_path)
# If the file is a web path, download it to a temporary file, and use that
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
r = requests.get(self.file_path)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s"
% r.status_code
)
self.web_path = self.file_path
self.temp_file = tempfile.NamedTemporaryFile()
self.temp_file.write(r.content)
self.file_path = self.temp_file.name
elif not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file or url" % self.file_path)
def __del__(self) -> None:
if hasattr(self, "temp_file"):
self.temp_file.close()
def load(self) -> List[Document]:
"""Load given path as single page."""
import docx2txt
return [
Document(
page_content=docx2txt.process(self.file_path),
metadata={"source": self.original_file_path},
)
]
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
class UnstructuredWordDocumentLoader(UnstructuredFileLoader):
"""Load `Microsoft Word` file using `Unstructured`.
Works with both .docx and .doc files.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredWordDocumentLoader
loader = UnstructuredWordDocumentLoader(
"example.docx", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-docx
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Args:
file_path: The path to the Word file to load.
mode: The mode to use when loading the file. Can be one of "single",
"multi", or "all". Default is "single".
**unstructured_kwargs: Any kwargs to pass to the unstructured.
"""
file_path = str(file_path)
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.file_utils.filetype import FileType, detect_filetype
# NOTE(MthwRobinson) - magic will raise an import error if the libmagic
# system dependency isn't installed. If it's not installed, we'll just
# check the file extension
try:
import magic # noqa: F401
is_doc = detect_filetype(self.file_path) == FileType.DOC # type: ignore[arg-type]
except ImportError:
_, extension = os.path.splitext(str(self.file_path))
is_doc = extension == ".doc"
if is_doc:
validate_unstructured_version("0.4.11")
if is_doc:
from unstructured.partition.doc import partition_doc
return partition_doc(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
else:
from unstructured.partition.docx import partition_docx
return partition_docx(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ReLU")
class ReLU(Layer):
"""Rectified Linear Unit activation function layer.
Formula:
``` python
f(x) = max(x,0)
f(x) = max_value if x >= max_value
f(x) = x if threshold <= x < max_value
f(x) = negative_slope * (x - threshold) otherwise
```
Example:
``` python
relu_layer = keras.layers.activations.ReLU(
max_value=10,
negative_slope=0.5,
threshold=0,
)
input = np.array([-10, -5, 0.0, 5, 10])
result = relu_layer(input)
# result = [-5. , -2.5, 0. , 5. , 10.]
```
Args:
max_value: Float >= 0. Maximum activation value. None means unlimited.
Defaults to `None`.
negative_slope: Float >= 0. Negative slope coefficient.
Defaults to `0.0`.
threshold: Float >= 0. Threshold value for thresholded activation.
Defaults to `0.0`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(
self, max_value=None, negative_slope=0.0, threshold=0.0, **kwargs
):
super().__init__(**kwargs)
if max_value is not None and max_value < 0.0:
raise ValueError(
"max_value of a ReLU layer cannot be a negative "
f"value. Received: max_value={max_value}"
)
if negative_slope is None or negative_slope < 0.0:
raise ValueError(
"negative_slope of a ReLU layer cannot be a negative "
f"value. Received: negative_slope={negative_slope}"
)
if threshold is None or threshold < 0.0:
raise ValueError(
"threshold of a ReLU layer cannot be a negative "
f"value. Received: threshold={threshold}"
)
self.max_value = max_value
self.negative_slope = negative_slope
self.threshold = threshold
self.supports_masking = True
self.built = True
def call(self, inputs):
return activations.relu(
inputs,
negative_slope=self.negative_slope,
max_value=self.max_value,
threshold=self.threshold,
)
def get_config(self):
config = super().get_config()
config.update(
{
"max_value": self.max_value,
"negative_slope": self.negative_slope,
"threshold": self.threshold,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
|
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ReLU")
class ReLU(Layer):
"""Rectified Linear Unit activation function layer.
Formula:
``` python
f(x) = max(x,0)
f(x) = max_value if x >= max_value
f(x) = x if threshold <= x < max_value
f(x) = negative_slope * (x - threshold) otherwise
```
Example:
``` python
relu_layer = keras.layers.activations.ReLU(
max_value=10,
negative_slope=0.5,
threshold=0,
)
input = np.array([-10, -5, 0.0, 5, 10])
result = relu_layer(input)
# result = [-5. , -2.5, 0. , 5. , 10.]
```
Args:
max_value: Float >= 0. Maximum activation value. None means unlimited.
Defaults to `None`.
negative_slope: Float >= 0. Negative slope coefficient.
Defaults to `0.0`.
threshold: Float >= 0. Threshold value for thresholded activation.
Defaults to `0.0`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(
self, max_value=None, negative_slope=0.0, threshold=0.0, **kwargs
):
super().__init__(**kwargs)
if max_value is not None and max_value < 0.0:
raise ValueError(
"max_value of a ReLU layer cannot be a negative "
f"value. Received: max_value={max_value}"
)
if negative_slope is None or negative_slope < 0.0:
raise ValueError(
"negative_slope of a ReLU layer cannot be a negative "
f"value. Received: negative_slope={negative_slope}"
)
if threshold is None or threshold < 0.0:
raise ValueError(
"threshold of a ReLU layer cannot be a negative "
f"value. Received: threshold={threshold}"
)
self.supports_masking = True
self.max_value = max_value
self.negative_slope = negative_slope
self.threshold = threshold
def call(self, inputs):
return activations.relu(
inputs,
negative_slope=self.negative_slope,
max_value=self.max_value,
threshold=self.threshold,
)
def get_config(self):
config = super().get_config()
config.update(
{
"max_value": self.max_value,
"negative_slope": self.negative_slope,
"threshold": self.threshold,
}
)
return config
def compute_output_shape(self, input_shape):
return input_shape
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"OperationType": "langchain_community.tools.ainetwork.base",
"AINBaseTool": "langchain_community.tools.ainetwork.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AINBaseTool",
"OperationType",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"OperationType": "langchain_community.tools.ainetwork.base",
"AINBaseTool": "langchain_community.tools.ainetwork.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"OperationType",
"AINBaseTool",
]
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# the model is trained from scratch, so init_cfg is None
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2), # leads to 0.1+ mAP
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class OnnxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
@property
def gpu_provider(self):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def gpu_options(self):
options = ort.SessionOptions()
options.enable_mem_pattern = False
return options
def test_inference_default_pndm(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
)
pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"botp/stable-diffusion-v1-5-inpainting",
revision="onnx",
safety_checker=None,
feature_extractor=None,
provider=self.gpu_provider,
sess_options=self.gpu_options,
)
pipe.set_progress_bar_config(disable=None)
prompt = "A red cat sitting on a park bench"
generator = np.random.RandomState(0)
output = pipe(
prompt=prompt,
image=init_image,
mask_image=mask_image,
guidance_scale=7.5,
num_inference_steps=10,
generator=generator,
output_type="np",
)
images = output.images
image_slice = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
expected_slice = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_inference_k_lms(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
)
lms_scheduler = LMSDiscreteScheduler.from_pretrained(
"botp/stable-diffusion-v1-5-inpainting", subfolder="scheduler", revision="onnx"
)
pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"botp/stable-diffusion-v1-5-inpainting",
revision="onnx",
scheduler=lms_scheduler,
safety_checker=None,
feature_extractor=None,
provider=self.gpu_provider,
sess_options=self.gpu_options,
)
pipe.set_progress_bar_config(disable=None)
prompt = "A red cat sitting on a park bench"
generator = np.random.RandomState(0)
output = pipe(
prompt=prompt,
image=init_image,
mask_image=mask_image,
guidance_scale=7.5,
num_inference_steps=20,
generator=generator,
output_type="np",
)
images = output.images
image_slice = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
expected_slice = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class OnnxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
@property
def gpu_provider(self):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def gpu_options(self):
options = ort.SessionOptions()
options.enable_mem_pattern = False
return options
def test_inference_default_pndm(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
)
pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"botp/stable-diffusion-v1-5-inpainting",
revision="onnx",
safety_checker=None,
feature_extractor=None,
provider=self.gpu_provider,
sess_options=self.gpu_options,
)
pipe.set_progress_bar_config(disable=None)
prompt = "A red cat sitting on a park bench"
generator = np.random.RandomState(0)
output = pipe(
prompt=prompt,
image=init_image,
mask_image=mask_image,
guidance_scale=7.5,
num_inference_steps=10,
generator=generator,
output_type="np",
)
images = output.images
image_slice = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
expected_slice = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_inference_k_lms(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
)
lms_scheduler = LMSDiscreteScheduler.from_pretrained(
"botp/stable-diffusion-v1-5-inpainting", subfolder="scheduler", revision="onnx"
)
pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"botp/stable-diffusion-v1-5-inpainting",
revision="onnx",
scheduler=lms_scheduler,
safety_checker=None,
feature_extractor=None,
provider=self.gpu_provider,
sess_options=self.gpu_options,
)
pipe.set_progress_bar_config(disable=None)
prompt = "A red cat sitting on a park bench"
generator = np.random.RandomState(0)
output = pipe(
prompt=prompt,
image=init_image,
mask_image=mask_image,
guidance_scale=7.5,
num_inference_steps=20,
generator=generator,
output_type="np",
)
images = output.images
image_slice = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
expected_slice = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
from huggingface_hub.constants import HF_HOME
from packaging import version
from ..dependency_versions_check import dep_version_check
from .import_utils import ENV_VARS_TRUE_VALUES, is_peft_available, is_transformers_available
MIN_PEFT_VERSION = "0.6.0"
MIN_TRANSFORMERS_VERSION = "4.34.0"
_CHECK_PEFT = os.environ.get("_CHECK_PEFT", "1") in ENV_VARS_TRUE_VALUES
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "diffusion_pytorch_model.bin"
WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.bin.index.json"
FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
ONNX_WEIGHTS_NAME = "model.onnx"
SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors"
SAFE_WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.safetensors.index.json"
SAFETENSORS_FILE_EXTENSION = "safetensors"
GGUF_FILE_EXTENSION = "gguf"
ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb"
HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(HF_HOME, "modules"))
DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"]
# Below should be `True` if the current version of `peft` and `transformers` are compatible with
# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are
# available.
# For PEFT it is has to be greater than or equal to 0.6.0 and for transformers it has to be greater than or equal to 4.34.0.
_required_peft_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("peft")).base_version
) >= version.parse(MIN_PEFT_VERSION)
_required_transformers_version = is_transformers_available() and version.parse(
version.parse(importlib.metadata.version("transformers")).base_version
) >= version.parse(MIN_TRANSFORMERS_VERSION)
USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
if USE_PEFT_BACKEND and _CHECK_PEFT:
dep_version_check("peft")
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
from huggingface_hub.constants import HF_HOME
from packaging import version
from ..dependency_versions_check import dep_version_check
from .import_utils import ENV_VARS_TRUE_VALUES, is_peft_available, is_transformers_available
MIN_PEFT_VERSION = "0.6.0"
MIN_TRANSFORMERS_VERSION = "4.34.0"
_CHECK_PEFT = os.environ.get("_CHECK_PEFT", "1") in ENV_VARS_TRUE_VALUES
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "diffusion_pytorch_model.bin"
WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.bin.index.json"
FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
ONNX_WEIGHTS_NAME = "model.onnx"
SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors"
SAFE_WEIGHTS_INDEX_NAME = "diffusion_pytorch_model.safetensors.index.json"
SAFETENSORS_FILE_EXTENSION = "safetensors"
GGUF_FILE_EXTENSION = "gguf"
ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb"
HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(HF_HOME, "modules"))
DEPRECATED_REVISION_ARGS = ["fp16", "non-ema"]
# Below should be `True` if the current version of `peft` and `transformers` are compatible with
# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are
# available.
# For PEFT it is has to be greater than or equal to 0.6.0 and for transformers it has to be greater than or equal to 4.34.0.
_required_peft_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("peft")).base_version
) >= version.parse(MIN_PEFT_VERSION)
_required_transformers_version = is_transformers_available() and version.parse(
version.parse(importlib.metadata.version("transformers")).base_version
) >= version.parse(MIN_TRANSFORMERS_VERSION)
USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
if USE_PEFT_BACKEND and _CHECK_PEFT:
dep_version_check("peft")
|
from langchain_huggingface.chat_models.huggingface import ( # type: ignore[import-not-found]
TGI_MESSAGE,
TGI_RESPONSE,
ChatHuggingFace,
_convert_dict_to_message,
)
__all__ = ["ChatHuggingFace", "_convert_dict_to_message", "TGI_MESSAGE", "TGI_RESPONSE"]
|
from langchain_huggingface.chat_models.huggingface import ( # type: ignore[import-not-found]
TGI_MESSAGE,
TGI_RESPONSE,
ChatHuggingFace,
_convert_message_to_chat_message,
_convert_TGI_message_to_LC_message,
)
__all__ = [
"ChatHuggingFace",
"_convert_message_to_chat_message",
"_convert_TGI_message_to_LC_message",
"TGI_MESSAGE",
"TGI_RESPONSE",
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
_base_ = './fast-rcnn_r50_fpn_1x_coco.py'
train_cfg = dict(max_epochs=24)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fast-rcnn_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
"""Base tool spec class."""
import asyncio
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import ToolMetadata
AsyncCallable = Callable[..., Awaitable[Any]]
# TODO: deprecate the Tuple (there's no use for it)
SPEC_FUNCTION_TYPE = Union[str, Tuple[str, str]]
class BaseToolSpec:
"""Base tool spec class."""
# list of functions that you'd want to convert to spec
spec_functions: List[SPEC_FUNCTION_TYPE]
def get_fn_schema_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[Type[BaseModel]]:
"""
NOTE: This function is deprecated and kept only for backwards compatibility.
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
return None
def get_metadata_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[ToolMetadata]:
"""
NOTE: This function is deprecated and kept only for backwards compatibility.
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
schema = self.get_fn_schema_from_fn_name(fn_name, spec_functions=spec_functions)
if schema is None:
return None
func = getattr(self, fn_name)
name = fn_name
docstring = func.__doc__ or ""
description = f"{name}{signature(func)}\n{docstring}"
fn_schema = self.get_fn_schema_from_fn_name(
fn_name, spec_functions=spec_functions
)
return ToolMetadata(name=name, description=description, fn_schema=fn_schema)
def to_tool_list(
self,
spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None,
func_to_metadata_mapping: Optional[Dict[str, ToolMetadata]] = None,
) -> List[FunctionTool]:
"""Convert tool spec to list of tools."""
spec_functions = spec_functions or self.spec_functions
func_to_metadata_mapping = func_to_metadata_mapping or {}
tool_list = []
for func_spec in spec_functions:
func_sync = None
func_async = None
if isinstance(func_spec, str):
func = getattr(self, func_spec)
if asyncio.iscoroutinefunction(func):
func_async = func
else:
func_sync = func
metadata = func_to_metadata_mapping.get(func_spec, None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec)
elif isinstance(func_spec, tuple) and len(func_spec) == 2:
func_sync = getattr(self, func_spec[0])
func_async = getattr(self, func_spec[1])
metadata = func_to_metadata_mapping.get(func_spec[0], None)
if metadata is None:
metadata = func_to_metadata_mapping.get(func_spec[1], None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec[0])
else:
raise ValueError(
"spec_functions must be of type: List[Union[str, Tuple[str, str]]]"
)
tool = FunctionTool.from_defaults(
fn=func_sync,
async_fn=func_async,
tool_metadata=metadata,
)
tool_list.append(tool)
return tool_list
|
"""Base tool spec class."""
import asyncio
from inspect import signature
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import ToolMetadata
AsyncCallable = Callable[..., Awaitable[Any]]
# TODO: deprecate the Tuple (there's no use for it)
SPEC_FUNCTION_TYPE = Union[str, Tuple[str, str]]
class BaseToolSpec:
"""Base tool spec class."""
# list of functions that you'd want to convert to spec
spec_functions: List[SPEC_FUNCTION_TYPE]
def get_fn_schema_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[Type[BaseModel]]:
"""
NOTE: This function is deprecated and kept only for backwards compatibility.
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
return None
def get_metadata_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[ToolMetadata]:
"""
NOTE: This function is deprecated and kept only for backwards compatibility.
Return map from function name.
Return type is Optional, meaning that the schema can be None.
In this case, it's up to the downstream tool implementation to infer the schema.
"""
schema = self.get_fn_schema_from_fn_name(fn_name, spec_functions=spec_functions)
if schema is None:
return None
func = getattr(self, fn_name)
name = fn_name
docstring = func.__doc__ or ""
description = f"{name}{signature(func)}\n{docstring}"
fn_schema = self.get_fn_schema_from_fn_name(
fn_name, spec_functions=spec_functions
)
return ToolMetadata(name=name, description=description, fn_schema=fn_schema)
def to_tool_list(
self,
spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None,
func_to_metadata_mapping: Optional[Dict[str, ToolMetadata]] = None,
) -> List[FunctionTool]:
"""Convert tool spec to list of tools."""
spec_functions = spec_functions or self.spec_functions
func_to_metadata_mapping = func_to_metadata_mapping or {}
tool_list = []
for func_spec in spec_functions:
func_sync = None
func_async = None
if isinstance(func_spec, str):
func = getattr(self, func_spec)
if asyncio.iscoroutinefunction(func):
func_async = func
else:
func_sync = func
metadata = func_to_metadata_mapping.get(func_spec, None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec)
elif isinstance(func_spec, tuple) and len(func_spec) == 2:
func_sync = getattr(self, func_spec[0])
func_async = getattr(self, func_spec[1])
metadata = func_to_metadata_mapping.get(func_spec[0], None)
if metadata is None:
metadata = func_to_metadata_mapping.get(func_spec[1], None)
if metadata is None:
metadata = self.get_metadata_from_fn_name(func_spec[0])
else:
raise ValueError(
"spec_functions must be of type: List[Union[str, Tuple[str, str]]]"
)
if func_sync is None:
if func_async is not None:
func_sync = patch_sync(func_async)
else:
raise ValueError(
f"Could not retrieve a function for spec: {func_spec}"
)
tool = FunctionTool.from_defaults(
fn=func_sync,
async_fn=func_async,
tool_metadata=metadata,
)
tool_list.append(tool)
return tool_list
def patch_sync(func_async: AsyncCallable) -> Callable:
"""Patch sync function from async function."""
def patched_sync(*args: Any, **kwargs: Any) -> Any:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(func_async(*args, **kwargs))
return patched_sync
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
import inspect
import threading
from typing import Awaitable, Callable, ParamSpec, TypeVar, cast, overload
P = ParamSpec("P")
R = TypeVar("R")
@overload
def thread_cached(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]: ...
@overload
def thread_cached(func: Callable[P, R]) -> Callable[P, R]: ...
def thread_cached(
func: Callable[P, R] | Callable[P, Awaitable[R]],
) -> Callable[P, R] | Callable[P, Awaitable[R]]:
thread_local = threading.local()
def _clear():
if hasattr(thread_local, "cache"):
del thread_local.cache
if inspect.iscoroutinefunction(func):
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = await cast(Callable[P, Awaitable[R]], func)(
*args, **kwargs
)
return cache[key]
setattr(async_wrapper, "clear_cache", _clear)
return async_wrapper
else:
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
setattr(sync_wrapper, "clear_cache", _clear)
return sync_wrapper
def clear_thread_cache(func: Callable) -> None:
if clear := getattr(func, "clear_cache", None):
clear()
|
import inspect
import threading
from typing import Any, Awaitable, Callable, ParamSpec, TypeVar, cast, overload
P = ParamSpec("P")
R = TypeVar("R")
@overload
def thread_cached(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]: ...
@overload
def thread_cached(func: Callable[P, R]) -> Callable[P, R]: ...
def thread_cached(
func: Callable[P, R] | Callable[P, Awaitable[R]],
) -> Callable[P, R] | Callable[P, Awaitable[R]]:
thread_local = threading.local()
if inspect.iscoroutinefunction(func):
async def async_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = await cast(Callable[P, Awaitable[R]], func)(
*args, **kwargs
)
return cache[key]
return async_wrapper
else:
def sync_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
cache = getattr(thread_local, "cache", None)
if cache is None:
cache = thread_local.cache = {}
# Include function in the key to prevent collisions between different functions
key = (args, tuple(sorted(kwargs.items())))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return sync_wrapper
def clear_thread_cache(func: Callable[..., Any]) -> None:
"""Clear the cache for a thread-cached function."""
thread_local = threading.local()
cache = getattr(thread_local, "cache", None)
if cache is not None:
# Clear all entries that match the function
for key in list(cache.keys()):
if key and len(key) > 0 and key[0] == func:
del cache[key]
|
import logging
from typing import Any
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("data", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
required_fields = cls.get_input_schema(data).get("required", [])
return set(required_fields) - set(data)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
user_id=graph_exec.user_id,
graph_id=graph_exec.graph_id,
graph_exec_id=graph_exec.graph_exec_id,
):
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.node_id:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
import logging
from typing import Any
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
from backend.util import json
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
@classmethod
def get_input_schema(cls, data: BlockInput) -> dict[str, Any]:
return data.get("input_schema", {})
@classmethod
def get_input_defaults(cls, data: BlockInput) -> BlockInput:
return data.get("data", {})
@classmethod
def get_missing_input(cls, data: BlockInput) -> set[str]:
required_fields = cls.get_input_schema(data).get("required", [])
return set(required_fields) - set(data)
@classmethod
def get_mismatch_error(cls, data: BlockInput) -> str | None:
return json.validate_with_jsonschema(cls.get_input_schema(data), data)
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
):
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.node_id:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
# Copyright 2025 Google Brain and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ...configuration_utils import ConfigMixin, register_to_config
from ...utils.torch_utils import randn_tensor
from ..scheduling_utils import SchedulerMixin
class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin):
"""
`ScoreSdeVpScheduler` is a variance preserving stochastic differential equation (SDE) scheduler.
This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
methods the library implements for all schedulers such as loading and saving.
Args:
num_train_timesteps (`int`, defaults to 2000):
The number of diffusion steps to train the model.
beta_min (`int`, defaults to 0.1):
beta_max (`int`, defaults to 20):
sampling_eps (`int`, defaults to 1e-3):
The end value of sampling where timesteps decrease progressively from 1 to epsilon.
"""
order = 1
@register_to_config
def __init__(self, num_train_timesteps=2000, beta_min=0.1, beta_max=20, sampling_eps=1e-3):
self.sigmas = None
self.discrete_sigmas = None
self.timesteps = None
def set_timesteps(self, num_inference_steps, device: Union[str, torch.device] = None):
"""
Sets the continuous timesteps used for the diffusion chain (to be run before inference).
Args:
num_inference_steps (`int`):
The number of diffusion steps used when generating samples with a pre-trained model.
device (`str` or `torch.device`, *optional*):
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
"""
self.timesteps = torch.linspace(1, self.config.sampling_eps, num_inference_steps, device=device)
def step_pred(self, score, x, t, generator=None):
"""
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
score ():
x ():
t ():
generator (`torch.Generator`, *optional*):
A random number generator.
"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler"
)
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
log_mean_coeff = -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
std = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff))
std = std.flatten()
while len(std.shape) < len(score.shape):
std = std.unsqueeze(-1)
score = -score / std
# compute
dt = -1.0 / len(self.timesteps)
beta_t = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
beta_t = beta_t.flatten()
while len(beta_t.shape) < len(x.shape):
beta_t = beta_t.unsqueeze(-1)
drift = -0.5 * beta_t * x
diffusion = torch.sqrt(beta_t)
drift = drift - diffusion**2 * score
x_mean = x + drift * dt
# add noise
noise = randn_tensor(x.shape, layout=x.layout, generator=generator, device=x.device, dtype=x.dtype)
x = x_mean + diffusion * math.sqrt(-dt) * noise
return x, x_mean
def __len__(self):
return self.config.num_train_timesteps
|
# Copyright 2024 Google Brain and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ...configuration_utils import ConfigMixin, register_to_config
from ...utils.torch_utils import randn_tensor
from ..scheduling_utils import SchedulerMixin
class ScoreSdeVpScheduler(SchedulerMixin, ConfigMixin):
"""
`ScoreSdeVpScheduler` is a variance preserving stochastic differential equation (SDE) scheduler.
This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
methods the library implements for all schedulers such as loading and saving.
Args:
num_train_timesteps (`int`, defaults to 2000):
The number of diffusion steps to train the model.
beta_min (`int`, defaults to 0.1):
beta_max (`int`, defaults to 20):
sampling_eps (`int`, defaults to 1e-3):
The end value of sampling where timesteps decrease progressively from 1 to epsilon.
"""
order = 1
@register_to_config
def __init__(self, num_train_timesteps=2000, beta_min=0.1, beta_max=20, sampling_eps=1e-3):
self.sigmas = None
self.discrete_sigmas = None
self.timesteps = None
def set_timesteps(self, num_inference_steps, device: Union[str, torch.device] = None):
"""
Sets the continuous timesteps used for the diffusion chain (to be run before inference).
Args:
num_inference_steps (`int`):
The number of diffusion steps used when generating samples with a pre-trained model.
device (`str` or `torch.device`, *optional*):
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
"""
self.timesteps = torch.linspace(1, self.config.sampling_eps, num_inference_steps, device=device)
def step_pred(self, score, x, t, generator=None):
"""
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
score ():
x ():
t ():
generator (`torch.Generator`, *optional*):
A random number generator.
"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler"
)
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
log_mean_coeff = -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
std = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff))
std = std.flatten()
while len(std.shape) < len(score.shape):
std = std.unsqueeze(-1)
score = -score / std
# compute
dt = -1.0 / len(self.timesteps)
beta_t = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
beta_t = beta_t.flatten()
while len(beta_t.shape) < len(x.shape):
beta_t = beta_t.unsqueeze(-1)
drift = -0.5 * beta_t * x
diffusion = torch.sqrt(beta_t)
drift = drift - diffusion**2 * score
x_mean = x + drift * dt
# add noise
noise = randn_tensor(x.shape, layout=x.layout, generator=generator, device=x.device, dtype=x.dtype)
x = x_mean + diffusion * math.sqrt(-dt) * noise
return x, x_mean
def __len__(self):
return self.config.num_train_timesteps
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss,
bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .mse_loss import MSELoss, mse_loss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 'GHMC',
'GHMR', 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss',
'l1_loss', 'isr_p', 'carl_loss', 'AssociativeEmbeddingLoss',
'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss',
'VarifocalLoss', 'KnowledgeDistillationKLDivLoss', 'SeesawLoss'
]
|
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss,
bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .mse_loss import MSELoss, mse_loss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 'GHMC',
'GHMR', 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss',
'l1_loss', 'isr_p', 'carl_loss', 'AssociativeEmbeddingLoss',
'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss',
'VarifocalLoss', 'KnowledgeDistillationKLDivLoss', 'SeesawLoss'
]
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import RerankingEvaluator
from sentence_transformers.util import cos_sim
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseRerankingEvaluator(RerankingEvaluator):
def __init__(
self,
samples: list[dict[str, str | list[str]]],
at_k: int = 10,
name: str = "",
write_csv: bool = True,
similarity_fct: Callable[[Tensor, Tensor], Tensor] = cos_sim,
batch_size: int = 64,
show_progress_bar: bool = False,
use_batched_encoding: bool = True,
truncate_dim: int | None = None,
mrr_at_k: int | None = None,
):
return super().__init__(
samples=samples,
at_k=at_k,
name=name,
write_csv=write_csv,
similarity_fct=similarity_fct,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
use_batched_encoding=use_batched_encoding,
truncate_dim=truncate_dim,
mrr_at_k=mrr_at_k,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def compute_metrices(self, model: SparseEncoder):
return super().compute_metrices(model)
def compute_metrices_batched(self, model: SparseEncoder):
return super().compute_metrices_batched(model)
def compute_metrices_individual(self, model: SparseEncoder):
return super().compute_metrices_individual(model)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
show_progress_bar: bool | None = None,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=show_progress_bar,
convert_to_sparse_tensor=True,
convert_to_tensor=False, # as we are using slicing on sparse tensors that is not supported so we want to keep a list of sparse tensors
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import RerankingEvaluator
from sentence_transformers.util import cos_sim
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseRerankingEvaluator(RerankingEvaluator):
def __init__(
self,
samples: list[dict[str, str | list[str]]],
at_k: int = 10,
name: str = "",
write_csv: bool = True,
similarity_fct: Callable[[Tensor, Tensor], Tensor] = cos_sim,
batch_size: int = 64,
show_progress_bar: bool = False,
use_batched_encoding: bool = True,
truncate_dim: int | None = None,
mrr_at_k: int | None = None,
):
return super().__init__(
samples=samples,
at_k=at_k,
name=name,
write_csv=write_csv,
similarity_fct=similarity_fct,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
use_batched_encoding=use_batched_encoding,
truncate_dim=truncate_dim,
mrr_at_k=mrr_at_k,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def compute_metrices(self, model: SparseEncoder):
return super().compute_metrices(model)
def compute_metrices_batched(self, model: SparseEncoder):
return super().compute_metrices_batched(model)
def compute_metrices_individual(self, model: SparseEncoder):
return super().compute_metrices_individual(model)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
show_progress_bar: bool | None = None,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=show_progress_bar,
convert_to_sparse_tensor=True,
convert_to_tensor=False, # as we are using slicing on sparse tensors that is not supported so we want to keep a list of sparse tensors
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
"""DeepMemory Retrieval Pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.schema import TextNode
from llama_index.core.storage.storage_context import StorageContext
from llama_index.vector_stores.deeplake import DeepLakeVectorStore
class DeepMemoryRetrieverPack(BaseLlamaPack):
"""DeepMemory retriever pack."""
def __init__(
self,
dataset_path: str = "llama_index",
token: Optional[str] = None,
read_only: Optional[bool] = False,
overwrite: bool = False,
verbose: bool = True,
nodes: Optional[List[TextNode]] = None,
top_k: int = 4,
**kwargs: Any,
):
self._vector_store = DeepLakeVectorStore(
dataset_path=dataset_path,
token=token,
read_only=read_only,
overwrite=overwrite,
verbose=verbose,
)
if nodes is not None:
self._storage_context = StorageContext.from_defaults(
vector_store=self._vector_store
)
self._index = VectorStoreIndex(
nodes, storage_context=self._storage_context, **kwargs
)
else:
self._index = VectorStoreIndex.from_vector_store(
self._vector_store, **kwargs
)
self._storage_context = self._index.storage_context
self.retriever = self._index.as_retriever(
similarity_top_k=top_k, vector_store_kwargs={"deep_memory": True}
)
self.query_engine = RetrieverQueryEngine.from_args(retriever=self.retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_store": self._vector_store,
"storage_context": self._storage_context,
"index": self._index,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
"""DeepMemory Retrieval Pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.schema import TextNode
from llama_index.core.storage.storage_context import StorageContext
from llama_index.vector_stores.deeplake import DeepLakeVectorStore
class DeepMemoryRetrieverPack(BaseLlamaPack):
"""DeepMemory retriever pack."""
def __init__(
self,
dataset_path: str = "llama_index",
token: Optional[str] = None,
read_only: Optional[bool] = False,
overwrite: bool = False,
verbose: bool = True,
nodes: Optional[List[TextNode]] = None,
top_k: int = 4,
**kwargs: Any,
):
self._vector_store = DeepLakeVectorStore(
dataset_path=dataset_path,
token=token,
read_only=read_only,
overwrite=overwrite,
verbose=verbose,
)
if nodes is not None:
self._storage_context = StorageContext.from_defaults(
vector_store=self._vector_store
)
self._index = VectorStoreIndex(
nodes, storage_context=self._storage_context, **kwargs
)
else:
self._index = VectorStoreIndex.from_vector_store(
self._vector_store, **kwargs
)
self._storage_context = self._index.storage_context
self.retriever = self._index.as_retriever(
similarity_top_k=top_k, vector_store_kwargs={"deep_memory": True}
)
self.query_engine = RetrieverQueryEngine.from_args(retriever=self.retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_store": self._vector_store,
"storage_context": self._storage_context,
"index": self._index,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
from fastapi import FastAPI
from fastapi.openapi.docs import (
get_redoc_html,
get_swagger_ui_html,
get_swagger_ui_oauth2_redirect_html,
)
app = FastAPI(docs_url=None, redoc_url=None)
@app.get("/docs", include_in_schema=False)
async def custom_swagger_ui_html():
return get_swagger_ui_html(
openapi_url=app.openapi_url,
title=app.title + " - Swagger UI",
oauth2_redirect_url=app.swagger_ui_oauth2_redirect_url,
swagger_js_url="https://unpkg.com/swagger-ui-dist@5/swagger-ui-bundle.js",
swagger_css_url="https://unpkg.com/swagger-ui-dist@5/swagger-ui.css",
)
@app.get(app.swagger_ui_oauth2_redirect_url, include_in_schema=False)
async def swagger_ui_redirect():
return get_swagger_ui_oauth2_redirect_html()
@app.get("/redoc", include_in_schema=False)
async def redoc_html():
return get_redoc_html(
openapi_url=app.openapi_url,
title=app.title + " - ReDoc",
redoc_js_url="https://unpkg.com/redoc@2/bundles/redoc.standalone.js",
)
@app.get("/users/{username}")
async def read_user(username: str):
return {"message": f"Hello {username}"}
|
from fastapi import FastAPI
from fastapi.openapi.docs import (
get_redoc_html,
get_swagger_ui_html,
get_swagger_ui_oauth2_redirect_html,
)
app = FastAPI(docs_url=None, redoc_url=None)
@app.get("/docs", include_in_schema=False)
async def custom_swagger_ui_html():
return get_swagger_ui_html(
openapi_url=app.openapi_url,
title=app.title + " - Swagger UI",
oauth2_redirect_url=app.swagger_ui_oauth2_redirect_url,
swagger_js_url="https://unpkg.com/swagger-ui-dist@5/swagger-ui-bundle.js",
swagger_css_url="https://unpkg.com/swagger-ui-dist@5/swagger-ui.css",
)
@app.get(app.swagger_ui_oauth2_redirect_url, include_in_schema=False)
async def swagger_ui_redirect():
return get_swagger_ui_oauth2_redirect_html()
@app.get("/redoc", include_in_schema=False)
async def redoc_html():
return get_redoc_html(
openapi_url=app.openapi_url,
title=app.title + " - ReDoc",
redoc_js_url="https://unpkg.com/redoc@next/bundles/redoc.standalone.js",
)
@app.get("/users/{username}")
async def read_user(username: str):
return {"message": f"Hello {username}"}
|
# Copyright (c) OpenMMLab. All rights reserved.
# from mmengine.dist import get_dist_info, all_reduce
from collections import OrderedDict
from typing import Generator, List
from unittest.mock import MagicMock, Mock
import torch
from torch._utils import (_flatten_dense_tensors, _take_tensors,
_unflatten_dense_tensors)
from mmengine.registry import HOOKS
from .hook import Hook
# TODO, replace with import mmengine.dist as dist
dist = Mock()
dist.IS_DIST = MagicMock(return_value=True)
# TODO, replace with mmengine.dist.get_dist_info
get_dist_info = MagicMock(return_value=(0, 1))
# TODO, replace with mmengine.dist.all_reduce
all_reduce = MagicMock()
# TODO, may need to move to dist.utils after implementing dist module
def _allreduce_coalesced(tensors: List[torch.Tensor],
world_size: int,
bucket_size_mb: int = -1) -> None:
"""All-reduce a sequence of tensors as a whole.
Args:
tensors (List[torch.Tensor]): A sequence of tensors to be
all-reduced.
world_size (int): The world size of the process group.
bucket_size_mb (int): The limit of each chunk in megabytes
for grouping tensors into chunks. Defaults to -1.
"""
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_params(params: Generator[torch.Tensor, None, None],
coalesce: bool = True,
bucket_size_mb: int = -1) -> None:
"""All-reduce parameters.
Args:
params (Generator[torch.Tensor, None, None]): List of parameters or
buffers of a model.
coalesce (bool, optional): Whether to reduce parameters as a whole.
Defaults to True.
bucket_size_mb (int, optional): Size of bucket, the unit is MB.
Defaults to -1.
"""
_, world_size = get_dist_info()
if world_size == 1:
return
params_data = [param.data for param in params]
if coalesce:
_allreduce_coalesced(params_data, world_size, bucket_size_mb)
else:
for tensor in params_data:
all_reduce(tensor.div_(world_size))
@HOOKS.register_module()
class SyncBuffersHook(Hook):
"""Synchronize model buffers such as running_mean and running_var in BN at
the end of each epoch."""
priority = 'NORMAL'
def __init__(self) -> None:
self.distributed = dist.IS_DIST
def after_train_epoch(self, runner) -> None:
"""All-reduce model buffers at the end of each epoch.
Args:
runner (Runner): The runner of the training process.
"""
if self.distributed:
allreduce_params(runner.model.buffers())
|
# Copyright (c) OpenMMLab. All rights reserved.
# from mmengine.dist import get_dist_info, all_reduce
from collections import OrderedDict
from typing import Generator, List
from unittest.mock import MagicMock, Mock
import torch
from torch._utils import (_flatten_dense_tensors, _take_tensors,
_unflatten_dense_tensors)
from mmengine.registry import HOOKS
from .hook import Hook
# TODO, replace with import mmengine.dist as dist
dist = Mock()
dist.IS_DIST = MagicMock(return_value=True)
# TODO, replace with mmengine.dist.get_dist_info
get_dist_info = MagicMock(return_value=(0, 1))
# TODO, replace with mmengine.dist.all_reduce
all_reduce = MagicMock()
# TODO, may need to move to dist.utils after implementing dist module
def _allreduce_coalesced(tensors: List[torch.Tensor],
world_size: int,
bucket_size_mb: int = -1) -> None:
"""All-reduce a sequence of tensors as a whole.
Args:
tensors (List[torch.Tensor]): A sequence of tensors to be
all-reduced.
world_size (int): The world size of the process group.
bucket_size_mb (int): The limit of each chunk in megabytes
for grouping tensors into chunks. Defaults to -1.
"""
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_params(params: Generator[torch.Tensor, None, None],
coalesce: bool = True,
bucket_size_mb: int = -1) -> None:
"""All-reduce parameters.
Args:
params (Generator[torch.Tensor, None, None]): List of parameters or
buffers of a model.
coalesce (bool, optional): Whether to reduce parameters as a whole.
Defaults to True.
bucket_size_mb (int, optional): Size of bucket, the unit is MB.
Defaults to -1.
"""
_, world_size = get_dist_info()
if world_size == 1:
return
params_data = [param.data for param in params]
if coalesce:
_allreduce_coalesced(params_data, world_size, bucket_size_mb)
else:
for tensor in params_data:
all_reduce(tensor.div_(world_size))
@HOOKS.register_module()
class SyncBuffersHook(Hook):
"""Synchronize model buffers such as running_mean and running_var in BN at
the end of each epoch."""
priority = 'NORMAL'
def __init__(self) -> None:
self.distributed = dist.IS_DIST
def after_epoch(self, runner) -> None:
"""All-reduce model buffers at the end of each epoch.
Args:
runner (Runner): The runner of the training process.
"""
if self.distributed:
allreduce_params(runner.model.buffers())
|
_base_ = 'ssd300_coco.py'
# model settings
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.1, 0.9),
strides=[8, 16, 32, 64, 128, 256, 512],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]])))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = 'ssd300_coco.py'
# model settings
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.1, 0.9),
strides=[8, 16, 32, 64, 128, 256, 512],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]])))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
import subprocess
import pytest
from flair_text import FlairTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 100
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=FlairTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"cuda"',
],
timeout=30,
check=True,
)
|
import subprocess
import pytest
from flair_text import FlairTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 100
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=FlairTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Sequence, Tuple
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
from mmdet.utils import OptMultiConfig
@MODELS.register_module()
class CTResNetNeck(BaseModule):
"""The neck used in `CenterNet <https://arxiv.org/abs/1904.07850>`_ for
object classification and box regression.
Args:
in_channels (int): Number of input channels.
num_deconv_filters (tuple[int]): Number of filters per stage.
num_deconv_kernels (tuple[int]): Number of kernels per stage.
use_dcn (bool): If True, use DCNv2. Defaults to True.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`], optional): Initialization
config dict.
"""
def __init__(self,
in_channels: int,
num_deconv_filters: Tuple[int, ...],
num_deconv_kernels: Tuple[int, ...],
use_dcn: bool = True,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
assert len(num_deconv_filters) == len(num_deconv_kernels)
self.fp16_enabled = False
self.use_dcn = use_dcn
self.in_channels = in_channels
self.deconv_layers = self._make_deconv_layer(num_deconv_filters,
num_deconv_kernels)
def _make_deconv_layer(
self, num_deconv_filters: Tuple[int, ...],
num_deconv_kernels: Tuple[int, ...]) -> nn.Sequential:
"""use deconv layers to upsample backbone's output."""
layers = []
for i in range(len(num_deconv_filters)):
feat_channels = num_deconv_filters[i]
conv_module = ConvModule(
self.in_channels,
feat_channels,
3,
padding=1,
conv_cfg=dict(type='DCNv2') if self.use_dcn else None,
norm_cfg=dict(type='BN'))
layers.append(conv_module)
upsample_module = ConvModule(
feat_channels,
feat_channels,
num_deconv_kernels[i],
stride=2,
padding=1,
conv_cfg=dict(type='deconv'),
norm_cfg=dict(type='BN'))
layers.append(upsample_module)
self.in_channels = feat_channels
return nn.Sequential(*layers)
def init_weights(self) -> None:
"""Initialize the parameters."""
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
# In order to be consistent with the source code,
# reset the ConvTranspose2d initialization parameters
m.reset_parameters()
# Simulated bilinear upsampling kernel
w = m.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (
1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# self.use_dcn is False
elif not self.use_dcn and isinstance(m, nn.Conv2d):
# In order to be consistent with the source code,
# reset the Conv2d initialization parameters
m.reset_parameters()
def forward(self, x: Sequence[torch.Tensor]) -> Tuple[torch.Tensor]:
"""model forward."""
assert isinstance(x, (list, tuple))
outs = self.deconv_layers(x[-1])
return outs,
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Sequence, Tuple
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmdet.core.utils import OptMultiConfig
from mmdet.registry import MODELS
@MODELS.register_module()
class CTResNetNeck(BaseModule):
"""The neck used in `CenterNet <https://arxiv.org/abs/1904.07850>`_ for
object classification and box regression.
Args:
in_channels (int): Number of input channels.
num_deconv_filters (tuple[int]): Number of filters per stage.
num_deconv_kernels (tuple[int]): Number of kernels per stage.
use_dcn (bool): If True, use DCNv2. Defaults to True.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`], optional): Initialization
config dict.
"""
def __init__(self,
in_channels: int,
num_deconv_filters: Tuple[int, ...],
num_deconv_kernels: Tuple[int, ...],
use_dcn: bool = True,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
assert len(num_deconv_filters) == len(num_deconv_kernels)
self.fp16_enabled = False
self.use_dcn = use_dcn
self.in_channels = in_channels
self.deconv_layers = self._make_deconv_layer(num_deconv_filters,
num_deconv_kernels)
def _make_deconv_layer(
self, num_deconv_filters: Tuple[int, ...],
num_deconv_kernels: Tuple[int, ...]) -> nn.Sequential:
"""use deconv layers to upsample backbone's output."""
layers = []
for i in range(len(num_deconv_filters)):
feat_channels = num_deconv_filters[i]
conv_module = ConvModule(
self.in_channels,
feat_channels,
3,
padding=1,
conv_cfg=dict(type='DCNv2') if self.use_dcn else None,
norm_cfg=dict(type='BN'))
layers.append(conv_module)
upsample_module = ConvModule(
feat_channels,
feat_channels,
num_deconv_kernels[i],
stride=2,
padding=1,
conv_cfg=dict(type='deconv'),
norm_cfg=dict(type='BN'))
layers.append(upsample_module)
self.in_channels = feat_channels
return nn.Sequential(*layers)
def init_weights(self) -> None:
"""Initialize the parameters."""
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
# In order to be consistent with the source code,
# reset the ConvTranspose2d initialization parameters
m.reset_parameters()
# Simulated bilinear upsampling kernel
w = m.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (
1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# self.use_dcn is False
elif not self.use_dcn and isinstance(m, nn.Conv2d):
# In order to be consistent with the source code,
# reset the Conv2d initialization parameters
m.reset_parameters()
def forward(self, x: Sequence[torch.Tensor]) -> Tuple[torch.Tensor]:
"""model forward."""
assert isinstance(x, (list, tuple))
outs = self.deconv_layers(x[-1])
return outs,
|
"""
This example uses average word embeddings (for example from GloVe). It adds two fully-connected feed-forward layers (dense layers) to create a Deep Averaging Network (DAN).
If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server.
See https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/
for available word embeddings files
"""
import traceback
from datasets import load_dataset
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_avg_word_embeddings-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode="mean",
)
# Add two trainable feed-forward networks (DAN)
sent_embeddings_dimension = pooling_model.get_sentence_embedding_dimension()
dan1 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
dan2 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dan1, dan2])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/losses.html#cosentloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="glove-mean-pooling-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = "glove-mean-pooling-sts"
try:
model.push_to_hub(model_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}')`."
)
|
"""
This example uses average word embeddings (for example from GloVe). It adds two fully-connected feed-forward layers (dense layers) to create a Deep Averaging Network (DAN).
If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server.
See https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/
for available word embeddings files
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import csv
import gzip
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Read the dataset
batch_size = 32
model_save_path = "output/training_stsbenchmark_avg_word_embeddings-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
# Add two trainable feed-forward networks (DAN)
sent_embeddings_dimension = pooling_model.get_sentence_embedding_dimension()
dan1 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
dan2 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dan1, dan2])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
model.evaluate(test_evaluator)
|
from docarray.documents.text import Text
def test_text_document_operators():
doc = Text(text='text', url='url.com')
assert doc == 'text'
assert doc != 'url.com'
doc2 = Text(id=doc.id, text='text', url='url.com')
assert doc == doc2
doc3 = Text(id='other-id', text='text', url='url.com')
assert doc != doc3
assert 't' in doc
assert 'a' not in doc
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
text = Text()
assert text is not None
assert text.text is None
|
from docarray.documents.text import Text
def test_text_document_operators():
doc = Text(text='text', url='url.com')
assert doc == 'text'
assert doc != 'url.com'
doc2 = Text(id=doc.id, text='text', url='url.com')
assert doc == doc2
doc3 = Text(id='other-id', text='text', url='url.com')
assert doc != doc3
assert 't' in doc
assert 'a' not in doc
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
|
_base_ = './retinanet_r50-caffe_fpn_ms-3x_coco.py'
# learning policy
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = './retinanet_r50-caffe_fpn_ms-3x_coco.py'
# learning policy
model = dict(
pretrained='open-mmlab://detectron2/resnet101_caffe',
backbone=dict(depth=101))
|
from llama_index_instrumentation.span import active_span_id
from llama_index_instrumentation.span.base import BaseSpan
from llama_index_instrumentation.span.simple import SimpleSpan
__all__ = ["BaseSpan", "SimpleSpan", "active_span_id"]
|
from contextvars import ContextVar
from typing import Optional
from llama_index.core.instrumentation.span.base import BaseSpan
from llama_index.core.instrumentation.span.simple import SimpleSpan
# ContextVar for managing active spans
active_span_id: ContextVar[Optional[str]] = ContextVar("active_span_id", default=None)
active_span_id.set(None)
__all__ = ["BaseSpan", "SimpleSpan", "active_span_id"]
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric=['bbox', 'segm'],
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_instance/test')
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric=['bbox', 'segm'],
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_instance/test')
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput, SchedulerMixin, UNet2DModel
class CustomLocalPipeline(DiffusionPipeline):
r"""
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Parameters:
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
[`DDPMScheduler`], or [`DDIMScheduler`].
"""
def __init__(self, unet: UNet2DModel, scheduler: SchedulerMixin):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
generator: Optional[torch.Generator] = None,
num_inference_steps: int = 50,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of images to generate.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
eta (`float`, *optional*, defaults to 0.0):
The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
generated images.
"""
# Sample gaussian noise to begin loop
image = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator,
)
image = image.to(self.device)
# set step values
self.scheduler.set_timesteps(num_inference_steps)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(image, t).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
image = self.scheduler.step(model_output, t, image).prev_sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=image), "This is a local test"
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class CustomLocalPipeline(DiffusionPipeline):
r"""
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Parameters:
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
[`DDPMScheduler`], or [`DDIMScheduler`].
"""
def __init__(self, unet, scheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
generator: Optional[torch.Generator] = None,
num_inference_steps: int = 50,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of images to generate.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
eta (`float`, *optional*, defaults to 0.0):
The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
generated images.
"""
# Sample gaussian noise to begin loop
image = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
generator=generator,
)
image = image.to(self.device)
# set step values
self.scheduler.set_timesteps(num_inference_steps)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(image, t).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
image = self.scheduler.step(model_output, t, image).prev_sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=image), "This is a local test"
|
__version__ = '0.30.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
__version__ = '0.30.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .ddq_detr_aux_loss import DDQAuxLoss
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, SIoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .l2_loss import L2Loss
from .margin_loss import MarginL2Loss
from .mse_loss import MSELoss, mse_loss
from .multipos_cross_entropy_loss import MultiPosCrossEntropyLoss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .triplet_loss import TripletLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'SIoULoss', 'GHMC', 'GHMR', 'reduce_loss',
'weight_reduce_loss', 'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p',
'carl_loss', 'AssociativeEmbeddingLoss', 'GaussianFocalLoss',
'QualityFocalLoss', 'DistributionFocalLoss', 'VarifocalLoss',
'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'DiceLoss', 'EQLV2Loss',
'MarginL2Loss', 'MultiPosCrossEntropyLoss', 'L2Loss', 'TripletLoss',
'DDQAuxLoss'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, SIoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .l2_loss import L2Loss
from .margin_loss import MarginL2Loss
from .mse_loss import MSELoss, mse_loss
from .multipos_cross_entropy_loss import MultiPosCrossEntropyLoss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .triplet_loss import TripletLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'SIoULoss', 'GHMC', 'GHMR', 'reduce_loss',
'weight_reduce_loss', 'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p',
'carl_loss', 'AssociativeEmbeddingLoss', 'GaussianFocalLoss',
'QualityFocalLoss', 'DistributionFocalLoss', 'VarifocalLoss',
'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'DiceLoss', 'EQLV2Loss',
'MarginL2Loss', 'MultiPosCrossEntropyLoss', 'L2Loss', 'TripletLoss'
]
|
from collections.abc import Sequence
from inspect import signature
from typing import Optional, Union
from langchain_core.callbacks import Callbacks
from langchain_core.documents import (
BaseDocumentCompressor,
BaseDocumentTransformer,
Document,
)
from pydantic import ConfigDict
class DocumentCompressorPipeline(BaseDocumentCompressor):
"""Document compressor that uses a pipeline of Transformers."""
transformers: list[Union[BaseDocumentTransformer, BaseDocumentCompressor]]
"""List of document filters that are chained together and run in sequence."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Transform a list of documents."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.compress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = _transformer.compress_documents(
documents, query, callbacks=callbacks
)
else:
documents = _transformer.compress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = _transformer.transform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.acompress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = await _transformer.acompress_documents(
documents, query, callbacks=callbacks
)
else:
documents = await _transformer.acompress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = await _transformer.atransform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
|
from collections.abc import Sequence
from inspect import signature
from typing import Optional, Union
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import (
BaseDocumentCompressor,
BaseDocumentTransformer,
Document,
)
from pydantic import ConfigDict
class DocumentCompressorPipeline(BaseDocumentCompressor):
"""Document compressor that uses a pipeline of Transformers."""
transformers: list[Union[BaseDocumentTransformer, BaseDocumentCompressor]]
"""List of document filters that are chained together and run in sequence."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Transform a list of documents."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.compress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = _transformer.compress_documents(
documents, query, callbacks=callbacks
)
else:
documents = _transformer.compress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = _transformer.transform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.acompress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = await _transformer.acompress_documents(
documents, query, callbacks=callbacks
)
else:
documents = await _transformer.acompress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = await _transformer.atransform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type
from langchain_core.tools import BaseTool
from langchain_core.utils import guard_import
from pydantic import model_validator
if TYPE_CHECKING:
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
else:
try:
# We do this so pydantic can resolve the types when instantiating
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
except ImportError:
pass
def lazy_import_playwright_browsers() -> Tuple[Type[AsyncBrowser], Type[SyncBrowser]]:
"""
Lazy import playwright browsers.
Returns:
Tuple[Type[AsyncBrowser], Type[SyncBrowser]]:
AsyncBrowser and SyncBrowser classes.
"""
return (
guard_import(module_name="playwright.async_api").Browser,
guard_import(module_name="playwright.sync_api").Browser,
)
class BaseBrowserTool(BaseTool):
"""Base class for browser tools."""
sync_browser: Optional["SyncBrowser"] = None
async_browser: Optional["AsyncBrowser"] = None
@model_validator(mode="before")
@classmethod
def validate_browser_provided(cls, values: dict) -> Any:
"""Check that the arguments are valid."""
lazy_import_playwright_browsers()
if values.get("async_browser") is None and values.get("sync_browser") is None:
raise ValueError("Either async_browser or sync_browser must be specified.")
return values
@classmethod
def from_browser(
cls,
sync_browser: Optional[SyncBrowser] = None,
async_browser: Optional[AsyncBrowser] = None,
) -> BaseBrowserTool:
"""Instantiate the tool."""
lazy_import_playwright_browsers()
return cls(sync_browser=sync_browser, async_browser=async_browser) # type: ignore[call-arg]
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type
from langchain_core.tools import BaseTool
from langchain_core.utils import guard_import
from pydantic import model_validator
if TYPE_CHECKING:
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
else:
try:
# We do this so pydantic can resolve the types when instantiating
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
except ImportError:
pass
def lazy_import_playwright_browsers() -> Tuple[Type[AsyncBrowser], Type[SyncBrowser]]:
"""
Lazy import playwright browsers.
Returns:
Tuple[Type[AsyncBrowser], Type[SyncBrowser]]:
AsyncBrowser and SyncBrowser classes.
"""
return (
guard_import(module_name="playwright.async_api").Browser,
guard_import(module_name="playwright.sync_api").Browser,
)
class BaseBrowserTool(BaseTool): # type: ignore[override]
"""Base class for browser tools."""
sync_browser: Optional["SyncBrowser"] = None
async_browser: Optional["AsyncBrowser"] = None
@model_validator(mode="before")
@classmethod
def validate_browser_provided(cls, values: dict) -> Any:
"""Check that the arguments are valid."""
lazy_import_playwright_browsers()
if values.get("async_browser") is None and values.get("sync_browser") is None:
raise ValueError("Either async_browser or sync_browser must be specified.")
return values
@classmethod
def from_browser(
cls,
sync_browser: Optional[SyncBrowser] = None,
async_browser: Optional[AsyncBrowser] = None,
) -> BaseBrowserTool:
"""Instantiate the tool."""
lazy_import_playwright_browsers()
return cls(sync_browser=sync_browser, async_browser=async_browser) # type: ignore[call-arg]
|
import os
from source_separation.utils.dataset import wsj0mix
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
_FILENAMES = [
"012c0207_1.9952_01cc0202_-1.9952.wav",
"01co0302_1.63_014c020q_-1.63.wav",
"01do0316_0.24011_205a0104_-0.24011.wav",
"01lc020x_1.1301_027o030r_-1.1301.wav",
"01mc0202_0.34056_205o0106_-0.34056.wav",
"01nc020t_0.53821_018o030w_-0.53821.wav",
"01po030f_2.2136_40ko031a_-2.2136.wav",
"01ra010o_2.4098_403a010f_-2.4098.wav",
"01xo030b_0.22377_016o031a_-0.22377.wav",
"02ac020x_0.68566_01ec020b_-0.68566.wav",
"20co010m_0.82801_019c0212_-0.82801.wav",
"20da010u_1.2483_017c0211_-1.2483.wav",
"20oo010d_1.0631_01ic020s_-1.0631.wav",
"20sc0107_2.0222_20fo010h_-2.0222.wav",
"20tc010f_0.051456_404a0110_-0.051456.wav",
"407c0214_1.1712_02ca0113_-1.1712.wav",
"40ao030w_2.4697_20vc010a_-2.4697.wav",
"40pa0101_1.1087_40ea0107_-1.1087.wav",
]
def _mock_dataset(root_dir, num_speaker):
dirnames = ["mix"] + [f"s{i+1}" for i in range(num_speaker)]
for dirname in dirnames:
os.makedirs(os.path.join(root_dir, dirname), exist_ok=True)
seed = 0
sample_rate = 8000
expected = []
for filename in _FILENAMES:
mix = None
src = []
for dirname in dirnames:
waveform = get_whitenoise(sample_rate=8000, duration=1, n_channels=1, dtype="int16", seed=seed)
seed += 1
path = os.path.join(root_dir, dirname, filename)
save_wav(path, waveform, sample_rate)
waveform = normalize_wav(waveform)
if dirname == "mix":
mix = waveform
else:
src.append(waveform)
expected.append((sample_rate, mix, src))
return expected
class TestWSJ0Mix2(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
expected = None
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.expected = _mock_dataset(cls.root_dir, 2)
def test_wsj0mix(self):
dataset = wsj0mix.WSJ0Mix(self.root_dir, num_speakers=2, sample_rate=8000)
n_ite = 0
for i, sample in enumerate(dataset):
(_, sample_mix, sample_src) = sample
(_, expected_mix, expected_src) = self.expected[i]
self.assertEqual(sample_mix, expected_mix, atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[0], expected_src[0], atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[1], expected_src[1], atol=5e-5, rtol=1e-8)
n_ite += 1
assert n_ite == len(self.expected)
class TestWSJ0Mix3(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
expected = None
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.expected = _mock_dataset(cls.root_dir, 3)
def test_wsj0mix(self):
dataset = wsj0mix.WSJ0Mix(self.root_dir, num_speakers=3, sample_rate=8000)
n_ite = 0
for i, sample in enumerate(dataset):
(_, sample_mix, sample_src) = sample
(_, expected_mix, expected_src) = self.expected[i]
self.assertEqual(sample_mix, expected_mix, atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[0], expected_src[0], atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[1], expected_src[1], atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[2], expected_src[2], atol=5e-5, rtol=1e-8)
n_ite += 1
assert n_ite == len(self.expected)
|
import os
from source_separation.utils.dataset import wsj0mix
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
_FILENAMES = [
"012c0207_1.9952_01cc0202_-1.9952.wav",
"01co0302_1.63_014c020q_-1.63.wav",
"01do0316_0.24011_205a0104_-0.24011.wav",
"01lc020x_1.1301_027o030r_-1.1301.wav",
"01mc0202_0.34056_205o0106_-0.34056.wav",
"01nc020t_0.53821_018o030w_-0.53821.wav",
"01po030f_2.2136_40ko031a_-2.2136.wav",
"01ra010o_2.4098_403a010f_-2.4098.wav",
"01xo030b_0.22377_016o031a_-0.22377.wav",
"02ac020x_0.68566_01ec020b_-0.68566.wav",
"20co010m_0.82801_019c0212_-0.82801.wav",
"20da010u_1.2483_017c0211_-1.2483.wav",
"20oo010d_1.0631_01ic020s_-1.0631.wav",
"20sc0107_2.0222_20fo010h_-2.0222.wav",
"20tc010f_0.051456_404a0110_-0.051456.wav",
"407c0214_1.1712_02ca0113_-1.1712.wav",
"40ao030w_2.4697_20vc010a_-2.4697.wav",
"40pa0101_1.1087_40ea0107_-1.1087.wav",
]
def _mock_dataset(root_dir, num_speaker):
dirnames = ["mix"] + [f"s{i+1}" for i in range(num_speaker)]
for dirname in dirnames:
os.makedirs(os.path.join(root_dir, dirname), exist_ok=True)
seed = 0
sample_rate = 8000
expected = []
for filename in _FILENAMES:
mix = None
src = []
for dirname in dirnames:
waveform = get_whitenoise(sample_rate=8000, duration=1, n_channels=1, dtype="int16", seed=seed)
seed += 1
path = os.path.join(root_dir, dirname, filename)
save_wav(path, waveform, sample_rate)
waveform = normalize_wav(waveform)
if dirname == "mix":
mix = waveform
else:
src.append(waveform)
expected.append((sample_rate, mix, src))
return expected
class TestWSJ0Mix2(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
expected = None
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.expected = _mock_dataset(cls.root_dir, 2)
def test_wsj0mix(self):
dataset = wsj0mix.WSJ0Mix(self.root_dir, num_speakers=2, sample_rate=8000)
n_ite = 0
for i, sample in enumerate(dataset):
(_, sample_mix, sample_src) = sample
(_, expected_mix, expected_src) = self.expected[i]
self.assertEqual(sample_mix, expected_mix, atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[0], expected_src[0], atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[1], expected_src[1], atol=5e-5, rtol=1e-8)
n_ite += 1
assert n_ite == len(self.expected)
class TestWSJ0Mix3(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
expected = None
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.expected = _mock_dataset(cls.root_dir, 3)
def test_wsj0mix(self):
dataset = wsj0mix.WSJ0Mix(self.root_dir, num_speakers=3, sample_rate=8000)
n_ite = 0
for i, sample in enumerate(dataset):
(_, sample_mix, sample_src) = sample
(_, expected_mix, expected_src) = self.expected[i]
self.assertEqual(sample_mix, expected_mix, atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[0], expected_src[0], atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[1], expected_src[1], atol=5e-5, rtol=1e-8)
self.assertEqual(sample_src[2], expected_src[2], atol=5e-5, rtol=1e-8)
n_ite += 1
assert n_ite == len(self.expected)
|
# Owner(s): ["module: dynamo"]
import torch
import torch._dynamo
import torch._dynamo.test_case
@torch._dynamo.config.patch("capture_scalar_outputs", True)
class ViewTests(torch._dynamo.test_case.TestCase):
def test_view_to_2d(self):
@torch.compile(fullgraph=True, backend="eager")
def f(t, _u0):
u0 = t[0].item()
u1 = t[1].item()
torch._check_is_size(u0)
torch._check_is_size(u1)
n = u0 * u1
a = torch.randn(n)
return a.view(-1, _u0)
t = torch.tensor([2, 4], dtype=torch.int32)
f(t, 2)
def test_view_to_1d(self):
@torch.compile(fullgraph=True, backend="eager")
def f(t, _n):
u0 = t[0].item()
u1 = t[1].item()
torch._check_is_size(u0)
torch._check_is_size(u1)
a = torch.randn(u0, u1)
return a.view(_n)
t = torch.tensor([2, 4], dtype=torch.int32)
f(t, 8)
def test_view_with_tensor_shape_params(self):
# Test for issue #156720: aten.view.default with tensor shape parameters
class TestModel(torch.nn.Module):
def forward(self, x, shape_params):
return torch.ops.aten.view.default(x, shape_params)
x = torch.randn(24)
shape_params = [
torch.tensor(2, dtype=torch.int32),
torch.tensor(3, dtype=torch.int32),
torch.tensor(4, dtype=torch.int32),
]
model = TestModel()
expected = model(x, shape_params)
compiled_model = torch.compile(model, backend="eager")
result = compiled_model(x, shape_params)
torch.testing.assert_close(result, expected)
def test_tensor_view_with_tensor_shape_params(self):
# Test tensor.view() method with tensor shape parameters (list version)
class TestModel(torch.nn.Module):
def forward(self, x, shape_params):
return x.view(shape_params)
x = torch.randn(24)
shape_params = (
torch.tensor(2, dtype=torch.int32),
torch.tensor(3, dtype=torch.int32),
torch.tensor(4, dtype=torch.int32),
)
model = TestModel()
expected = model(x, shape_params)
compiled_model = torch.compile(model, backend="eager")
result = compiled_model(x, shape_params)
torch.testing.assert_close(result, expected)
def test_tensor_view_with_tensor_args(self):
# Test tensor.view() method with individual tensor arguments
class TestModel(torch.nn.Module):
def forward(self, x, dim1, dim2, dim3):
return x.view(dim1, dim2, dim3)
x = torch.randn(24)
dim1 = torch.tensor(2, dtype=torch.int32)
dim2 = torch.tensor(3, dtype=torch.int32)
dim3 = torch.tensor(4, dtype=torch.int32)
model = TestModel()
expected = model(x, dim1, dim2, dim3)
compiled_model = torch.compile(model, backend="eager")
result = compiled_model(x, dim1, dim2, dim3)
torch.testing.assert_close(result, expected)
def test_torch_reshape_with_tensor_shape_params(self):
# Test torch.reshape() function with tensor shape parameters
def test_fn(x, shape_params):
return torch.reshape(x, shape_params)
x = torch.randn(24)
shape_params = [
torch.tensor(2, dtype=torch.int32),
torch.tensor(3, dtype=torch.int32),
torch.tensor(4, dtype=torch.int32),
]
expected = test_fn(x, shape_params)
compiled_fn = torch.compile(test_fn, backend="eager")
result = compiled_fn(x, shape_params)
torch.testing.assert_close(result, expected)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
# Owner(s): ["module: dynamo"]
import torch
import torch._dynamo
import torch._dynamo.test_case
@torch._dynamo.config.patch("capture_scalar_outputs", True)
class ViewTests(torch._dynamo.test_case.TestCase):
def test_view_to_2d(self):
@torch.compile(fullgraph=True, backend="eager")
def f(t, _u0):
u0 = t[0].item()
u1 = t[1].item()
torch._check_is_size(u0)
torch._check_is_size(u1)
n = u0 * u1
a = torch.randn(n)
return a.view(-1, _u0)
t = torch.tensor([2, 4], dtype=torch.int32)
f(t, 2)
def test_view_to_1d(self):
@torch.compile(fullgraph=True, backend="eager")
def f(t, _n):
u0 = t[0].item()
u1 = t[1].item()
torch._check_is_size(u0)
torch._check_is_size(u1)
a = torch.randn(u0, u1)
return a.view(_n)
t = torch.tensor([2, 4], dtype=torch.int32)
f(t, 8)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
import pathlib
from argparse import ArgumentParser
import sentencepiece as spm
from lightning import ConformerRNNTModule
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.plugins import DDPPlugin
from transforms import get_data_module
def run_train(args):
seed_everything(1)
checkpoint_dir = args.exp_dir / "checkpoints"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
train_checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/train_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
train_checkpoint,
lr_monitor,
]
trainer = Trainer(
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.nodes,
gpus=args.gpus,
accelerator="gpu",
strategy=DDPPlugin(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
gradient_clip_val=10.0,
)
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
model = ConformerRNNTModule(sp_model)
data_module = get_data_module(str(args.librispeech_path), str(args.global_stats_path), str(args.sp_model_path))
trainer.fit(model, data_module, ckpt_path=args.checkpoint_path)
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint-path",
default=None,
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
)
parser.add_argument(
"--exp-dir",
default=pathlib.Path("./exp"),
type=pathlib.Path,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--librispeech-path",
type=pathlib.Path,
help="Path to LibriSpeech datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=120,
type=int,
help="Number of epochs to train for. (Default: 120)",
)
args = parser.parse_args()
run_train(args)
if __name__ == "__main__":
cli_main()
|
import pathlib
from argparse import ArgumentParser
import sentencepiece as spm
from lightning import ConformerRNNTModule
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.plugins import DDPPlugin
from transforms import get_data_module
def run_train(args):
seed_everything(1)
checkpoint_dir = args.exp_dir / "checkpoints"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
train_checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/train_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
train_checkpoint,
lr_monitor,
]
trainer = Trainer(
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.nodes,
gpus=args.gpus,
accelerator="gpu",
strategy=DDPPlugin(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
)
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
model = ConformerRNNTModule(sp_model)
data_module = get_data_module(str(args.librispeech_path), str(args.global_stats_path), str(args.sp_model_path))
trainer.fit(model, data_module, ckpt_path=args.checkpoint_path)
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint-path",
default=None,
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
)
parser.add_argument(
"--exp-dir",
default=pathlib.Path("./exp"),
type=pathlib.Path,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--librispeech-path",
type=pathlib.Path,
help="Path to LibriSpeech datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=120,
type=int,
help="Number of epochs to train for. (Default: 120)",
)
args = parser.parse_args()
run_train(args)
if __name__ == "__main__":
cli_main()
|
from typing import Any, Optional
from langchain_core.runnables.base import RunnableBindingBase
from langchain_core.runnables.utils import Input, Output
class HubRunnable(RunnableBindingBase[Input, Output]):
"""
An instance of a runnable stored in the LangChain Hub.
"""
owner_repo_commit: str
def __init__(
self,
owner_repo_commit: str,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
from langchain.hub import pull
pulled = pull(owner_repo_commit, api_url=api_url, api_key=api_key)
super_kwargs = {
"kwargs": {},
"config": {},
**kwargs,
"bound": pulled,
"owner_repo_commit": owner_repo_commit,
}
super().__init__(**super_kwargs)
|
from typing import Any, Optional
from langchain_core.runnables.base import Input, Output, RunnableBindingBase
class HubRunnable(RunnableBindingBase[Input, Output]):
"""
An instance of a runnable stored in the LangChain Hub.
"""
owner_repo_commit: str
def __init__(
self,
owner_repo_commit: str,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
from langchain.hub import pull
pulled = pull(owner_repo_commit, api_url=api_url, api_key=api_key)
super_kwargs = {
"kwargs": {},
"config": {},
**kwargs,
"bound": pulled,
"owner_repo_commit": owner_repo_commit,
}
super().__init__(**super_kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .sampler_seed_hook import DistSamplerSeedHook
from .param_scheduler_hook import ParamSchedulerHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .sampler_seed_hook import DistSamplerSeedHook
__all__ = ['Hook', 'IterTimerHook', 'DistSamplerSeedHook']
|
_base_ = './fcos_hrnetv2p-w32-gn-head_ms-640-800-4xb4-2x_coco.py'
model = dict(
backbone=dict(
extra=dict(
stage2=dict(num_channels=(18, 36)),
stage3=dict(num_channels=(18, 36, 72)),
stage4=dict(num_channels=(18, 36, 72, 144))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')),
neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
|
_base_ = './fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py'
model = dict(
backbone=dict(
extra=dict(
stage2=dict(num_channels=(18, 36)),
stage3=dict(num_channels=(18, 36, 72)),
stage4=dict(num_channels=(18, 36, 72, 144))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')),
neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
|
from typing import Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
if sinfo[5] == 0:
waveform, _ = _load_audio(s)
num_frames = waveform.size(1)
else:
num_frames = sinfo[5]
return AudioMetaData(
int(sinfo[8]),
num_frames,
sinfo[9],
sinfo[6],
sinfo[1].upper(),
)
def info_audio(
src: str,
format: Optional[str],
) -> AudioMetaData:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _info_audio(s)
def info_audio_fileobj(
src,
format: Optional[str],
) -> AudioMetaData:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, 4096)
return _info_audio(s)
def _get_load_filter(
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
) -> Optional[str]:
if frame_offset < 0:
raise RuntimeError("Invalid argument: frame_offset must be non-negative. Found: {}".format(frame_offset))
if num_frames == 0 or num_frames < -1:
raise RuntimeError("Invalid argument: num_frames must be -1 or greater than 0. Found: {}".format(num_frames))
# All default values -> no filter
if frame_offset == 0 and num_frames == -1 and not convert:
return None
# Only convert
aformat = "aformat=sample_fmts=fltp"
if frame_offset == 0 and num_frames == -1 and convert:
return aformat
# At least one of frame_offset or num_frames has non-default value
if num_frames > 0:
atrim = "atrim=start_sample={}:end_sample={}".format(frame_offset, frame_offset + num_frames)
else:
atrim = "atrim=start_sample={}".format(frame_offset)
if not convert:
return atrim
return "{},{}".format(atrim, aformat)
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _load_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
) -> Tuple[torch.Tensor, int]:
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
sample_rate = int(sinfo[8])
option: Dict[str, str] = {}
s.add_audio_stream(i, -1, -1, _get_load_filter(frame_offset, num_frames, convert), None, option)
s.process_all_packets()
waveform = s.pop_chunks()[0]
if waveform is None:
raise RuntimeError("Failed to decode audio.")
assert waveform is not None
if channels_first:
waveform = waveform.T
return waveform, sample_rate
def load_audio(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
def load_audio_fileobj(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, 4096)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
|
from typing import Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
return AudioMetaData(
int(sinfo[8]),
sinfo[5],
sinfo[9],
sinfo[6],
sinfo[1].upper(),
)
def info_audio(
src: str,
format: Optional[str],
) -> AudioMetaData:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _info_audio(s)
def info_audio_fileobj(
src,
format: Optional[str],
) -> AudioMetaData:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, 4096)
return _info_audio(s)
def _get_load_filter(
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
) -> Optional[str]:
if frame_offset < 0:
raise RuntimeError("Invalid argument: frame_offset must be non-negative. Found: {}".format(frame_offset))
if num_frames == 0 or num_frames < -1:
raise RuntimeError("Invalid argument: num_frames must be -1 or greater than 0. Found: {}".format(num_frames))
# All default values -> no filter
if frame_offset == 0 and num_frames == -1 and not convert:
return None
# Only convert
aformat = "aformat=sample_fmts=fltp"
if frame_offset == 0 and num_frames == -1 and convert:
return aformat
# At least one of frame_offset or num_frames has non-default value
if num_frames > 0:
atrim = "atrim=start_sample={}:end_sample={}".format(frame_offset, frame_offset + num_frames)
else:
atrim = "atrim=start_sample={}".format(frame_offset)
if not convert:
return atrim
return "{},{}".format(atrim, aformat)
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _load_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
) -> Tuple[torch.Tensor, int]:
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
sample_rate = int(sinfo[8])
option: Dict[str, str] = {}
s.add_audio_stream(i, -1, -1, _get_load_filter(frame_offset, num_frames, convert), None, option)
s.process_all_packets()
waveform = s.pop_chunks()[0]
if waveform is None:
raise RuntimeError("Failed to decode audio.")
assert waveform is not None
if channels_first:
waveform = waveform.T
return waveform, sample_rate
def load_audio(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
def load_audio_fileobj(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, 4096)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
|
from pathlib import Path
import pytest
from torchaudio.datasets import dr_vctk
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
_SUBSETS = ["train", "test"]
_CONDITIONS = ["clean", "device-recorded"]
_SOURCES = ["DR-VCTK_Office1_ClosedWindow", "DR-VCTK_Office1_OpenedWindow"]
_SPEAKER_IDS = range(226, 230)
_CHANNEL_IDS = range(1, 6)
def get_mock_dataset(root_dir):
"""
root_dir: root directory of the mocked data
"""
mocked_samples = {}
dataset_dir = Path(root_dir) / "DR-VCTK" / "DR-VCTK"
dataset_dir.mkdir(parents=True, exist_ok=True)
config_dir = dataset_dir / "configurations"
config_dir.mkdir(parents=True, exist_ok=True)
sample_rate = 16000
seed = 0
for subset in _SUBSETS:
mocked_samples[subset] = []
for condition in _CONDITIONS:
audio_dir = dataset_dir / f"{condition}_{subset}set_wav_16k"
audio_dir.mkdir(parents=True, exist_ok=True)
config_filepath = config_dir / f"{subset}_ch_log.txt"
with open(config_filepath, "w") as f:
if subset == "train":
f.write("\n")
f.write("File Name\tMain Source\tChannel Idx\n")
for speaker_id in _SPEAKER_IDS:
utterance_id = 1
for source in _SOURCES:
for channel_id in _CHANNEL_IDS:
filename = f"p{speaker_id}_{utterance_id:03d}.wav"
f.write(f"{filename}\t{source}\t{channel_id}\n")
data = {}
for condition in _CONDITIONS:
data[condition] = get_whitenoise(
sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed
)
audio_dir = dataset_dir / f"{condition}_{subset}set_wav_16k"
audio_file_path = audio_dir / filename
save_wav(audio_file_path, data[condition], sample_rate)
seed += 1
sample = (
data[_CONDITIONS[0]],
sample_rate,
data[_CONDITIONS[1]],
sample_rate,
"p" + str(speaker_id),
f"{utterance_id:03d}",
source,
channel_id,
)
mocked_samples[subset].append(sample)
utterance_id += 1
return mocked_samples
class TestDRVCTK(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = {}
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_dr_vctk(self, dataset, subset):
num_samples = 0
for i, (
waveform_clean,
sample_rate_clean,
waveform_dr,
sample_rate_dr,
speaker_id,
utterance_id,
source,
channel_id,
) in enumerate(dataset):
self.assertEqual(waveform_clean, self.samples[subset][i][0], atol=5e-5, rtol=1e-8)
assert sample_rate_clean == self.samples[subset][i][1]
self.assertEqual(waveform_dr, self.samples[subset][i][2], atol=5e-5, rtol=1e-8)
assert sample_rate_dr == self.samples[subset][i][3]
assert speaker_id == self.samples[subset][i][4]
assert utterance_id == self.samples[subset][i][5]
assert source == self.samples[subset][i][6]
assert channel_id == self.samples[subset][i][7]
num_samples += 1
assert num_samples == len(self.samples[subset])
def test_dr_vctk_train_str(self):
subset = "train"
dataset = dr_vctk.DR_VCTK(self.root_dir, subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_test_str(self):
subset = "test"
dataset = dr_vctk.DR_VCTK(self.root_dir, subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_train_path(self):
subset = "train"
dataset = dr_vctk.DR_VCTK(Path(self.root_dir), subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_test_path(self):
subset = "test"
dataset = dr_vctk.DR_VCTK(Path(self.root_dir), subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_invalid_subset(self):
subset = "invalid"
with pytest.raises(RuntimeError, match=f"The subset '{subset}' does not match any of the supported subsets"):
dr_vctk.DR_VCTK(self.root_dir, subset=subset)
|
from pathlib import Path
import pytest
from torchaudio.datasets import dr_vctk
from torchaudio_unittest.common_utils import (
get_whitenoise,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
_SUBSETS = ["train", "test"]
_CONDITIONS = ["clean", "device-recorded"]
_SOURCES = ["DR-VCTK_Office1_ClosedWindow", "DR-VCTK_Office1_OpenedWindow"]
_SPEAKER_IDS = range(226, 230)
_CHANNEL_IDS = range(1, 6)
def get_mock_dataset(root_dir):
"""
root_dir: root directory of the mocked data
"""
mocked_samples = {}
dataset_dir = Path(root_dir) / "DR-VCTK" / "DR-VCTK"
dataset_dir.mkdir(parents=True, exist_ok=True)
config_dir = dataset_dir / "configurations"
config_dir.mkdir(parents=True, exist_ok=True)
sample_rate = 16000
seed = 0
for subset in _SUBSETS:
mocked_samples[subset] = []
for condition in _CONDITIONS:
audio_dir = dataset_dir / f"{condition}_{subset}set_wav_16k"
audio_dir.mkdir(parents=True, exist_ok=True)
config_filepath = config_dir / f"{subset}_ch_log.txt"
with open(config_filepath, "w") as f:
if subset == "train":
f.write("\n")
f.write("File Name\tMain Source\tChannel Idx\n")
for speaker_id in _SPEAKER_IDS:
utterance_id = 1
for source in _SOURCES:
for channel_id in _CHANNEL_IDS:
filename = f"p{speaker_id}_{utterance_id:03d}.wav"
f.write(f"{filename}\t{source}\t{channel_id}\n")
data = {}
for condition in _CONDITIONS:
data[condition] = get_whitenoise(
sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed
)
audio_dir = dataset_dir / f"{condition}_{subset}set_wav_16k"
audio_file_path = audio_dir / filename
save_wav(audio_file_path, data[condition], sample_rate)
seed += 1
sample = (
data[_CONDITIONS[0]],
sample_rate,
data[_CONDITIONS[1]],
sample_rate,
"p" + str(speaker_id),
f"{utterance_id:03d}",
source,
channel_id,
)
mocked_samples[subset].append(sample)
utterance_id += 1
return mocked_samples
class TestDRVCTK(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = {}
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_dr_vctk(self, dataset, subset):
num_samples = 0
for i, (
waveform_clean,
sample_rate_clean,
waveform_dr,
sample_rate_dr,
speaker_id,
utterance_id,
source,
channel_id,
) in enumerate(dataset):
self.assertEqual(waveform_clean, self.samples[subset][i][0], atol=5e-5, rtol=1e-8)
assert sample_rate_clean == self.samples[subset][i][1]
self.assertEqual(waveform_dr, self.samples[subset][i][2], atol=5e-5, rtol=1e-8)
assert sample_rate_dr == self.samples[subset][i][3]
assert speaker_id == self.samples[subset][i][4]
assert utterance_id == self.samples[subset][i][5]
assert source == self.samples[subset][i][6]
assert channel_id == self.samples[subset][i][7]
num_samples += 1
assert num_samples == len(self.samples[subset])
def test_dr_vctk_train_str(self):
subset = "train"
dataset = dr_vctk.DR_VCTK(self.root_dir, subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_test_str(self):
subset = "test"
dataset = dr_vctk.DR_VCTK(self.root_dir, subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_train_path(self):
subset = "train"
dataset = dr_vctk.DR_VCTK(Path(self.root_dir), subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_test_path(self):
subset = "test"
dataset = dr_vctk.DR_VCTK(Path(self.root_dir), subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_invalid_subset(self):
subset = "invalid"
with pytest.raises(RuntimeError, match=f"The subset '{subset}' does not match any of the supported subsets"):
dr_vctk.DR_VCTK(self.root_dir, subset=subset)
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import AgglomerativeClustering
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Some models don't automatically normalize the embeddings, in which case you should normalize the embeddings:
# corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform kmean clustering
clustering_model = AgglomerativeClustering(
n_clusters=None, distance_threshold=1.5
) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i + 1)
print(cluster)
print("")
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import AgglomerativeClustering
import numpy as np
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Normalize the embeddings to unit length
corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform kmean clustering
clustering_model = AgglomerativeClustering(
n_clusters=None, distance_threshold=1.5
) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i + 1)
print(cluster)
print("")
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.host = self.runtime_args.host
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class TOOD(SingleStageDetector):
r"""Implementation of `TOOD: Task-aligned One-stage Object Detection.
<https://arxiv.org/abs/2108.07755>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class TOOD(SingleStageDetector):
r"""Implementation of `TOOD: Task-aligned One-stage Object Detection.
<https://arxiv.org/abs/2108.07755>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(TOOD, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def set_epoch(self, epoch):
self.bbox_head.epoch = epoch
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.25.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.24.1'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from contextlib import contextmanager
from threading import Lock
from typing import TYPE_CHECKING, Any
from expiringdict import ExpiringDict
if TYPE_CHECKING:
from redis import Redis
from redis.lock import Lock as RedisLock
class RedisKeyedMutex:
"""
This class provides a mutex that can be locked and unlocked by a specific key,
using Redis as a distributed locking provider.
It uses an ExpiringDict to automatically clear the mutex after a specified timeout,
in case the key is not unlocked for a specified duration, to prevent memory leaks.
"""
def __init__(self, redis: "Redis", timeout: int | None = 60):
self.redis = redis
self.timeout = timeout
self.locks: dict[Any, "RedisLock"] = ExpiringDict(
max_len=6000, max_age_seconds=self.timeout
)
self.locks_lock = Lock()
@contextmanager
def locked(self, key: Any):
lock = self.acquire(key)
try:
yield
finally:
if lock.locked() and lock.owned():
lock.release()
def acquire(self, key: Any) -> "RedisLock":
"""Acquires and returns a lock with the given key"""
with self.locks_lock:
if key not in self.locks:
self.locks[key] = self.redis.lock(
str(key), self.timeout, thread_local=False
)
lock = self.locks[key]
lock.acquire()
return lock
def release(self, key: Any):
if (lock := self.locks.get(key)) and lock.locked() and lock.owned():
lock.release()
def release_all_locks(self):
"""Call this on process termination to ensure all locks are released"""
self.locks_lock.acquire(blocking=False)
for lock in self.locks.values():
if lock.locked() and lock.owned():
lock.release()
|
from contextlib import contextmanager
from threading import Lock
from typing import TYPE_CHECKING, Any
from expiringdict import ExpiringDict
if TYPE_CHECKING:
from redis import Redis
from redis.lock import Lock as RedisLock
class RedisKeyedMutex:
"""
This class provides a mutex that can be locked and unlocked by a specific key,
using Redis as a distributed locking provider.
It uses an ExpiringDict to automatically clear the mutex after a specified timeout,
in case the key is not unlocked for a specified duration, to prevent memory leaks.
"""
def __init__(self, redis: "Redis", timeout: int | None = 60):
self.redis = redis
self.timeout = timeout
self.locks: dict[Any, "RedisLock"] = ExpiringDict(
max_len=6000, max_age_seconds=self.timeout
)
self.locks_lock = Lock()
@contextmanager
def locked(self, key: Any):
lock = self.acquire(key)
try:
yield
finally:
if lock.locked():
lock.release()
def acquire(self, key: Any) -> "RedisLock":
"""Acquires and returns a lock with the given key"""
with self.locks_lock:
if key not in self.locks:
self.locks[key] = self.redis.lock(
str(key), self.timeout, thread_local=False
)
lock = self.locks[key]
lock.acquire()
return lock
def release(self, key: Any):
if (lock := self.locks.get(key)) and lock.locked() and lock.owned():
lock.release()
def release_all_locks(self):
"""Call this on process termination to ensure all locks are released"""
self.locks_lock.acquire(blocking=False)
for lock in self.locks.values():
if lock.locked() and lock.owned():
lock.release()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.