input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import os
import time
import pytest
from jina import Client, Document, DocumentArray, Flow
@pytest.mark.parametrize('shards', [1, 2])
@pytest.mark.parametrize('replicas', [1, 3, 4])
def test_containerruntime_args(
docker_image_name, docker_image_built, shards, replicas, port_generator
):
exposed_port = port_generator()
f = Flow(port=exposed_port).add(
name='executor_container',
uses=f'docker://{docker_image_name}',
replicas=replicas,
shards=shards,
polling='ANY',
)
with f:
ret1 = Client(port=exposed_port).index(
inputs=DocumentArray([Document() for _ in range(2000)]),
request_size=10,
return_responses=True,
)
assert len(ret1) == 200
unique_replicas = set()
shard_ids = set()
for r in ret1:
assert len(r.docs) == 10
for replica in r.docs[:, 'tags__replica']:
unique_replicas.add(replica)
for shard_id in r.docs[:, 'tags__shard_id']:
shard_ids.add(shard_id)
for doc in r.docs:
assert doc.tags['shards'] == shards
assert shard_ids == set(range(shards))
assert len(unique_replicas) == replicas * shards
|
import os
import time
import pytest
from jina import Client, Document, DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
img_name = 'jina/replica-exec'
@pytest.fixture(scope='function')
def docker_image_built():
import docker
client = docker.from_env()
client.images.build(path=os.path.join(cur_dir, 'replica-exec'), tag=img_name)
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.mark.parametrize('shards', [1, 2])
@pytest.mark.parametrize('replicas', [1, 3, 4])
def test_containerruntime_args(docker_image_built, shards, replicas, port_generator):
exposed_port = port_generator()
f = Flow(port=exposed_port).add(
name='executor_container',
uses=f'docker://{img_name}',
replicas=replicas,
shards=shards,
polling='ANY',
)
with f:
ret1 = Client(port=exposed_port).index(
inputs=DocumentArray([Document() for _ in range(2000)]),
request_size=10,
return_responses=True,
)
assert len(ret1) == 200
unique_replicas = set()
shard_ids = set()
for r in ret1:
assert len(r.docs) == 10
for replica in r.docs[:, 'tags__replica']:
unique_replicas.add(replica)
for shard_id in r.docs[:, 'tags__shard_id']:
shard_ids.add(shard_id)
for doc in r.docs:
assert doc.tags['shards'] == shards
assert shard_ids == set(range(shards))
assert len(unique_replicas) == replicas * shards
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import Neo4jChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"Neo4jChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Neo4jChatMessageHistory",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import Neo4jChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"Neo4jChatMessageHistory": "langchain_community.chat_message_histories"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Neo4jChatMessageHistory",
]
|
"""Simple reader that turns an iterable of strings into a list of Documents."""
from typing import List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class StringIterableReader(BasePydanticReader):
"""
String Iterable Reader.
Gets a list of documents, given an iterable (e.g. list) of strings.
Example:
.. code-block:: python
from llama_index import TreeIndex
from llama_index.readers import StringIterableReader
documents = StringIterableReader().load_data(
texts=["I went to the store", "I bought an apple"]
)
index = TreeIndex.from_documents(documents)
query_engine = index.as_query_engine()
query_engine.query("what did I buy?")
# response should be something like "You bought an apple."
"""
is_remote: bool = False
@classmethod
def class_name(cls) -> str:
return "StringIterableReader"
def load_data(self, texts: List[str]) -> List[Document]:
"""Load the data."""
results = []
for text in texts:
results.append(Document(text=text))
return results
|
"""Simple reader that turns an iterable of strings into a list of Documents."""
from typing import List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class StringIterableReader(BasePydanticReader):
"""String Iterable Reader.
Gets a list of documents, given an iterable (e.g. list) of strings.
Example:
.. code-block:: python
from llama_index import TreeIndex
from llama_index.readers import StringIterableReader
documents = StringIterableReader().load_data(
texts=["I went to the store", "I bought an apple"]
)
index = TreeIndex.from_documents(documents)
query_engine = index.as_query_engine()
query_engine.query("what did I buy?")
# response should be something like "You bought an apple."
"""
is_remote: bool = False
@classmethod
def class_name(cls) -> str:
return "StringIterableReader"
def load_data(self, texts: List[str]) -> List[Document]:
"""Load the data."""
results = []
for text in texts:
results.append(Document(text=text))
return results
|
"""Chain that hits a URL and then uses an LLM to parse results."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.chains import LLMChain
from langchain.chains.base import Chain
from langchain_core.callbacks import CallbackManagerForChainRun
from pydantic import ConfigDict, Field, model_validator
from langchain_community.utilities.requests import TextRequestsWrapper
DEFAULT_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" # noqa: E501
}
class LLMRequestsChain(Chain):
"""Chain that requests a URL and then uses an LLM to parse results.
**Security Note**: This chain can make GET requests to arbitrary URLs,
including internal URLs.
Control access to who can run this chain and what network access
this chain has.
See https://python.langchain.com/docs/security for more information.
"""
llm_chain: LLMChain
requests_wrapper: TextRequestsWrapper = Field(
default_factory=lambda: TextRequestsWrapper(headers=DEFAULT_HEADERS),
exclude=True,
)
text_length: int = 8000
requests_key: str = "requests_result" #: :meta private:
input_key: str = "url" #: :meta private:
output_key: str = "output" #: :meta private:
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the prompt expects.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Will always return text key.
:meta private:
"""
return [self.output_key]
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ImportError(
"Could not import bs4 python package. "
"Please install it with `pip install bs4`."
)
return values
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
from bs4 import BeautifulSoup
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# Other keys are assumed to be needed for LLM prediction
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
url = inputs[self.input_key]
res = self.requests_wrapper.get(url)
# extract the text from the html
soup = BeautifulSoup(res, "html.parser")
other_keys[self.requests_key] = soup.get_text()[: self.text_length]
result = self.llm_chain.predict(
callbacks=_run_manager.get_child(), **other_keys
)
return {self.output_key: result}
@property
def _chain_type(self) -> str:
return "llm_requests_chain"
|
"""Chain that hits a URL and then uses an LLM to parse results."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain.chains import LLMChain
from langchain.chains.base import Chain
from langchain_core.callbacks import CallbackManagerForChainRun
from pydantic import ConfigDict, Field, model_validator
from langchain_community.utilities.requests import TextRequestsWrapper
DEFAULT_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" # noqa: E501
}
class LLMRequestsChain(Chain):
"""Chain that requests a URL and then uses an LLM to parse results.
**Security Note**: This chain can make GET requests to arbitrary URLs,
including internal URLs.
Control access to who can run this chain and what network access
this chain has.
See https://python.langchain.com/docs/security for more information.
"""
llm_chain: LLMChain # type: ignore[valid-type]
requests_wrapper: TextRequestsWrapper = Field(
default_factory=lambda: TextRequestsWrapper(headers=DEFAULT_HEADERS),
exclude=True,
)
text_length: int = 8000
requests_key: str = "requests_result" #: :meta private:
input_key: str = "url" #: :meta private:
output_key: str = "output" #: :meta private:
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the prompt expects.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Will always return text key.
:meta private:
"""
return [self.output_key]
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ImportError(
"Could not import bs4 python package. "
"Please install it with `pip install bs4`."
)
return values
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
from bs4 import BeautifulSoup
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
# Other keys are assumed to be needed for LLM prediction
other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
url = inputs[self.input_key]
res = self.requests_wrapper.get(url)
# extract the text from the html
soup = BeautifulSoup(res, "html.parser")
other_keys[self.requests_key] = soup.get_text()[: self.text_length]
result = self.llm_chain.predict( # type: ignore[attr-defined]
callbacks=_run_manager.get_child(), **other_keys
)
return {self.output_key: result}
@property
def _chain_type(self) -> str:
return "llm_requests_chain"
|
import gc
import unittest
import numpy as np
import torch
from diffusers import FluxPipeline, FluxPriorReduxPipeline
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
Expectations,
backend_empty_cache,
numpy_cosine_similarity_distance,
require_big_accelerator,
slow,
torch_device,
)
@slow
@require_big_accelerator
class FluxReduxSlowTests(unittest.TestCase):
pipeline_class = FluxPriorReduxPipeline
repo_id = "black-forest-labs/FLUX.1-Redux-dev"
base_pipeline_class = FluxPipeline
base_repo_id = "black-forest-labs/FLUX.1-schnell"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_inputs(self, device, seed=0):
init_image = load_image(
"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy/img5.png"
)
return {"image": init_image}
def get_base_pipeline_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
return {
"num_inference_steps": 2,
"guidance_scale": 2.0,
"output_type": "np",
"generator": generator,
}
def test_flux_redux_inference(self):
pipe_redux = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.bfloat16)
pipe_base = self.base_pipeline_class.from_pretrained(
self.base_repo_id, torch_dtype=torch.bfloat16, text_encoder=None, text_encoder_2=None
)
pipe_redux.to(torch_device)
pipe_base.enable_model_cpu_offload(device=torch_device)
inputs = self.get_inputs(torch_device)
base_pipeline_inputs = self.get_base_pipeline_inputs(torch_device)
redux_pipeline_output = pipe_redux(**inputs)
image = pipe_base(**base_pipeline_inputs, **redux_pipeline_output).images[0]
image_slice = image[0, :10, :10]
expected_slices = Expectations(
{
("cuda", 7): np.array(
[
0.30078125,
0.37890625,
0.46875,
0.28125,
0.36914062,
0.47851562,
0.28515625,
0.375,
0.4765625,
0.28125,
0.375,
0.48046875,
0.27929688,
0.37695312,
0.47851562,
0.27734375,
0.38085938,
0.4765625,
0.2734375,
0.38085938,
0.47265625,
0.27539062,
0.37890625,
0.47265625,
0.27734375,
0.37695312,
0.47070312,
0.27929688,
0.37890625,
0.47460938,
],
dtype=np.float32,
),
("xpu", 3): np.array(
[
0.20507812,
0.30859375,
0.3984375,
0.18554688,
0.30078125,
0.41015625,
0.19921875,
0.3125,
0.40625,
0.19726562,
0.3125,
0.41601562,
0.19335938,
0.31445312,
0.4140625,
0.1953125,
0.3203125,
0.41796875,
0.19726562,
0.32421875,
0.41992188,
0.19726562,
0.32421875,
0.41992188,
0.20117188,
0.32421875,
0.41796875,
0.203125,
0.32617188,
0.41796875,
],
dtype=np.float32,
),
}
)
expected_slice = expected_slices.get_expectation()
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten())
assert max_diff < 1e-4
|
import gc
import unittest
import numpy as np
import pytest
import torch
from diffusers import FluxPipeline, FluxPriorReduxPipeline
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
Expectations,
backend_empty_cache,
numpy_cosine_similarity_distance,
require_big_accelerator,
slow,
torch_device,
)
@slow
@require_big_accelerator
@pytest.mark.big_accelerator
class FluxReduxSlowTests(unittest.TestCase):
pipeline_class = FluxPriorReduxPipeline
repo_id = "black-forest-labs/FLUX.1-Redux-dev"
base_pipeline_class = FluxPipeline
base_repo_id = "black-forest-labs/FLUX.1-schnell"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_inputs(self, device, seed=0):
init_image = load_image(
"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy/img5.png"
)
return {"image": init_image}
def get_base_pipeline_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
return {
"num_inference_steps": 2,
"guidance_scale": 2.0,
"output_type": "np",
"generator": generator,
}
def test_flux_redux_inference(self):
pipe_redux = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.bfloat16)
pipe_base = self.base_pipeline_class.from_pretrained(
self.base_repo_id, torch_dtype=torch.bfloat16, text_encoder=None, text_encoder_2=None
)
pipe_redux.to(torch_device)
pipe_base.enable_model_cpu_offload(device=torch_device)
inputs = self.get_inputs(torch_device)
base_pipeline_inputs = self.get_base_pipeline_inputs(torch_device)
redux_pipeline_output = pipe_redux(**inputs)
image = pipe_base(**base_pipeline_inputs, **redux_pipeline_output).images[0]
image_slice = image[0, :10, :10]
expected_slices = Expectations(
{
("cuda", 7): np.array(
[
0.30078125,
0.37890625,
0.46875,
0.28125,
0.36914062,
0.47851562,
0.28515625,
0.375,
0.4765625,
0.28125,
0.375,
0.48046875,
0.27929688,
0.37695312,
0.47851562,
0.27734375,
0.38085938,
0.4765625,
0.2734375,
0.38085938,
0.47265625,
0.27539062,
0.37890625,
0.47265625,
0.27734375,
0.37695312,
0.47070312,
0.27929688,
0.37890625,
0.47460938,
],
dtype=np.float32,
),
("xpu", 3): np.array(
[
0.20507812,
0.30859375,
0.3984375,
0.18554688,
0.30078125,
0.41015625,
0.19921875,
0.3125,
0.40625,
0.19726562,
0.3125,
0.41601562,
0.19335938,
0.31445312,
0.4140625,
0.1953125,
0.3203125,
0.41796875,
0.19726562,
0.32421875,
0.41992188,
0.19726562,
0.32421875,
0.41992188,
0.20117188,
0.32421875,
0.41796875,
0.203125,
0.32617188,
0.41796875,
],
dtype=np.float32,
),
}
)
expected_slice = expected_slices.get_expectation()
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten())
assert max_diff < 1e-4
|
import warnings
from abc import ABC
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.chat_history import (
BaseChatMessageHistory,
InMemoryChatMessageHistory,
)
from langchain_core.memory import BaseMemory
from langchain_core.messages import AIMessage, HumanMessage
from pydantic import Field
from langchain.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class BaseChatMemory(BaseMemory, ABC):
"""Abstract base class for chat memory.
**ATTENTION** This abstraction was created prior to when chat models had
native tool calling capabilities.
It does **NOT** support native tool calling capabilities for chat models and
will fail SILENTLY if used with a chat model that has native tool calling.
DO NOT USE THIS ABSTRACTION FOR NEW CODE.
"""
chat_memory: BaseChatMessageHistory = Field(
default_factory=InMemoryChatMessageHistory,
)
output_key: Optional[str] = None
input_key: Optional[str] = None
return_messages: bool = False
def _get_input_output(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) == 1:
output_key = next(iter(outputs.keys()))
elif "output" in outputs:
output_key = "output"
warnings.warn(
f"'{self.__class__.__name__}' got multiple output keys:"
f" {outputs.keys()}. The default 'output' key is being used."
f" If this is not desired, please manually set 'output_key'.",
stacklevel=3,
)
else:
msg = (
f"Got multiple output keys: {outputs.keys()}, cannot "
f"determine which to store in memory. Please set the "
f"'output_key' explicitly."
)
raise ValueError(msg)
else:
output_key = self.output_key
return inputs[prompt_input_key], outputs[output_key]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
],
)
async def asave_context(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
await self.chat_memory.aadd_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
],
)
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
async def aclear(self) -> None:
"""Clear memory contents."""
await self.chat_memory.aclear()
|
import warnings
from abc import ABC
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.chat_history import (
BaseChatMessageHistory,
InMemoryChatMessageHistory,
)
from langchain_core.memory import BaseMemory
from langchain_core.messages import AIMessage, HumanMessage
from pydantic import Field
from langchain.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class BaseChatMemory(BaseMemory, ABC):
"""Abstract base class for chat memory.
**ATTENTION** This abstraction was created prior to when chat models had
native tool calling capabilities.
It does **NOT** support native tool calling capabilities for chat models and
will fail SILENTLY if used with a chat model that has native tool calling.
DO NOT USE THIS ABSTRACTION FOR NEW CODE.
"""
chat_memory: BaseChatMessageHistory = Field(
default_factory=InMemoryChatMessageHistory,
)
output_key: Optional[str] = None
input_key: Optional[str] = None
return_messages: bool = False
def _get_input_output(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) == 1:
output_key = next(iter(outputs.keys()))
elif "output" in outputs:
output_key = "output"
warnings.warn(
f"'{self.__class__.__name__}' got multiple output keys:"
f" {outputs.keys()}. The default 'output' key is being used."
f" If this is not desired, please manually set 'output_key'.",
stacklevel=2,
)
else:
msg = (
f"Got multiple output keys: {outputs.keys()}, cannot "
f"determine which to store in memory. Please set the "
f"'output_key' explicitly."
)
raise ValueError(msg)
else:
output_key = self.output_key
return inputs[prompt_input_key], outputs[output_key]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
],
)
async def asave_context(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
await self.chat_memory.aadd_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
],
)
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
async def aclear(self) -> None:
"""Clear memory contents."""
await self.chat_memory.aclear()
|
def __getattr__(name: str):
if name in ["ctc_decoder", "lexicon_decoder"]:
import warnings
from torchaudio.models.decoder import ctc_decoder
warnings.warn(
f"{__name__}.{name} has been moved to torchaudio.models.decoder.ctc_decoder",
DeprecationWarning,
)
if name == "lexicon_decoder":
global lexicon_decoder
lexicon_decoder = ctc_decoder
return lexicon_decoder
else:
return ctc_decoder
elif name == "download_pretrained_files":
import warnings
from torchaudio.models.decoder import download_pretrained_files
return download_pretrained_files
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return ["ctc_decoder", "lexicon_decoder", "download_pretrained_files"]
|
_INITIALIZED = False
_LAZILY_IMPORTED = [
"Hypothesis",
"CTCDecoder",
"ctc_decoder",
"lexicon_decoder",
"download_pretrained_files",
]
def _init_extension():
import torchaudio
torchaudio._extension._load_lib("libtorchaudio_decoder")
global _INITIALIZED
_INITIALIZED = True
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
if not _INITIALIZED:
_init_extension()
try:
from . import _ctc_decoder
except AttributeError as err:
raise RuntimeError(
"CTC decoder requires the decoder extension. Please set BUILD_CTC_DECODER=1 when building from source."
) from err
item = getattr(_ctc_decoder, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.models.layers import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig, OptConfigType
@MODELS.register_module()
class GlobalContextHead(BaseModule):
"""Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
num_convs (int, optional): number of convolutional layer in GlbCtxHead.
Defaults to 4.
in_channels (int, optional): number of input channels. Defaults to 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Defaults to 256.
num_classes (int, optional): number of classes. Defaults to 80.
loss_weight (float, optional): global context loss weight.
Defaults to 1.
conv_cfg (dict, optional): config to init conv layer. Defaults to None.
norm_cfg (dict, optional): config to init norm layer. Defaults to None.
conv_to_res (bool, optional): if True, 2 convs will be grouped into
1 `SimplifiedBasicBlock` using a skip connection.
Defaults to False.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`]): Initialization config dict. Defaults to
dict(type='Normal', std=0.01, override=dict(name='fc')).
"""
def __init__(
self,
num_convs: int = 4,
in_channels: int = 256,
conv_out_channels: int = 256,
num_classes: int = 80,
loss_weight: float = 1.0,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
conv_to_res: bool = False,
init_cfg: MultiConfig = dict(
type='Normal', std=0.01, override=dict(name='fc'))
) -> None:
super().__init__(init_cfg=init_cfg)
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv_to_res = conv_to_res
self.fp16_enabled = False
if self.conv_to_res:
num_res_blocks = num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
else:
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(conv_out_channels, num_classes)
self.criterion = nn.BCEWithLogitsLoss()
def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function.
Args:
feats (Tuple[Tensor]): Multi-scale feature maps.
Returns:
Tuple[Tensor]:
- mc_pred (Tensor): Multi-class prediction.
- x (Tensor): Global context feature.
"""
x = feats[-1]
for i in range(self.num_convs):
x = self.convs[i](x)
x = self.pool(x)
# multi-class prediction
mc_pred = x.reshape(x.size(0), -1)
mc_pred = self.fc(mc_pred)
return mc_pred, x
def loss(self, pred: Tensor, labels: List[Tensor]) -> Tensor:
"""Loss function.
Args:
pred (Tensor): Logits.
labels (list[Tensor]): Grouth truths.
Returns:
Tensor: Loss.
"""
labels = [lbl.unique() for lbl in labels]
targets = pred.new_zeros(pred.size())
for i, label in enumerate(labels):
targets[i, label] = 1.0
loss = self.loss_weight * self.criterion(pred, targets)
return loss
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils.typing import MultiConfig, OptConfigType
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
@MODELS.register_module()
class GlobalContextHead(BaseModule):
"""Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
num_convs (int, optional): number of convolutional layer in GlbCtxHead.
Defaults to 4.
in_channels (int, optional): number of input channels. Defaults to 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Defaults to 256.
num_classes (int, optional): number of classes. Defaults to 80.
loss_weight (float, optional): global context loss weight.
Defaults to 1.
conv_cfg (dict, optional): config to init conv layer. Defaults to None.
norm_cfg (dict, optional): config to init norm layer. Defaults to None.
conv_to_res (bool, optional): if True, 2 convs will be grouped into
1 `SimplifiedBasicBlock` using a skip connection.
Defaults to False.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`]): Initialization config dict. Defaults to
dict(type='Normal', std=0.01, override=dict(name='fc')).
"""
def __init__(
self,
num_convs: int = 4,
in_channels: int = 256,
conv_out_channels: int = 256,
num_classes: int = 80,
loss_weight: float = 1.0,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
conv_to_res: bool = False,
init_cfg: MultiConfig = dict(
type='Normal', std=0.01, override=dict(name='fc'))
) -> None:
super().__init__(init_cfg=init_cfg)
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv_to_res = conv_to_res
self.fp16_enabled = False
if self.conv_to_res:
num_res_blocks = num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
else:
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(conv_out_channels, num_classes)
self.criterion = nn.BCEWithLogitsLoss()
def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function.
Args:
feats (Tuple[Tensor]): Multi-scale feature maps.
Returns:
Tuple[Tensor]:
- mc_pred (Tensor): Multi-class prediction.
- x (Tensor): Global context feature.
"""
x = feats[-1]
for i in range(self.num_convs):
x = self.convs[i](x)
x = self.pool(x)
# multi-class prediction
mc_pred = x.reshape(x.size(0), -1)
mc_pred = self.fc(mc_pred)
return mc_pred, x
def loss(self, pred: Tensor, labels: List[Tensor]) -> Tensor:
"""Loss function.
Args:
pred (Tensor): Logits.
labels (list[Tensor]): Grouth truths.
Returns:
Tensor: Loss.
"""
labels = [lbl.unique() for lbl in labels]
targets = pred.new_zeros(pred.size())
for i, label in enumerate(labels):
targets[i, label] = 1.0
loss = self.loss_weight * self.criterion(pred, targets)
return loss
|
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
img_scale = (640, 640) # height, width
# model settings
model = dict(
type='YOLOX',
input_size=img_scale,
random_size_range=(15, 25),
random_size_interval=10,
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
neck=dict(
type='YOLOXPAFPN',
in_channels=[128, 256, 512],
out_channels=128,
num_csp_blocks=1),
bbox_head=dict(
type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128),
train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)),
# In order to align the source code, the threshold of the val phase is
# 0.01, and the threshold of the test phase is 0.001.
test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
# dataset settings
data_root = 'data/coco/'
dataset_type = 'CocoDataset'
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
# According to the official implementation, multi-scale
# training is not considered here but in the
# 'mmdet/models/detectors/yolox.py'.
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
# If the image is three-channel, the pad value needs
# to be set separately for each channel.
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=train_pipeline)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
persistent_workers=True,
train=train_dataset,
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
# default 8 gpu
optimizer = dict(
type='SGD',
lr=0.01,
momentum=0.9,
weight_decay=5e-4,
nesterov=True,
paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
max_epochs = 300
num_last_epochs = 15
resume_from = None
interval = 10
# learning policy
lr_config = dict(
_delete_=True,
policy='YOLOX',
warmup='exp',
by_epoch=False,
warmup_by_epoch=True,
warmup_ratio=1,
warmup_iters=5, # 5 epoch
num_last_epochs=num_last_epochs,
min_lr_ratio=0.05)
runner = dict(type='EpochBasedRunner', max_epochs=max_epochs)
custom_hooks = [
dict(
type='YOLOXModeSwitchHook',
num_last_epochs=num_last_epochs,
priority=48),
dict(
type='SyncNormHook',
num_last_epochs=num_last_epochs,
interval=interval,
priority=48),
dict(
type='ExpMomentumEMAHook',
resume_from=resume_from,
momentum=0.0001,
priority=49)
]
checkpoint_config = dict(interval=interval)
evaluation = dict(
save_best='auto',
# The evaluation interval is 'interval' when running epoch is
# less than ‘max_epochs - num_last_epochs’.
# The evaluation interval is 1 when running epoch is greater than
# or equal to ‘max_epochs - num_last_epochs’.
interval=interval,
dynamic_intervals=[(max_epochs - num_last_epochs, 1)],
metric='bbox')
log_config = dict(interval=50)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
img_scale = (640, 640) # height, width
# model settings
model = dict(
type='YOLOX',
input_size=img_scale,
random_size_range=(15, 25),
random_size_interval=10,
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
neck=dict(
type='YOLOXPAFPN',
in_channels=[128, 256, 512],
out_channels=128,
num_csp_blocks=1),
bbox_head=dict(
type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128),
train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)),
# In order to align the source code, the threshold of the val phase is
# 0.01, and the threshold of the test phase is 0.001.
test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
# dataset settings
data_root = 'data/coco/'
dataset_type = 'CocoDataset'
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
# According to the official implementation, multi-scale
# training is not considered here but in the
# 'mmdet/models/detectors/yolox.py'.
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
# If the image is three-channel, the pad value needs
# to be set separately for each channel.
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=train_pipeline)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
persistent_workers=True,
train=train_dataset,
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
# default 8 gpu
optimizer = dict(
type='SGD',
lr=0.01,
momentum=0.9,
weight_decay=5e-4,
nesterov=True,
paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
max_epochs = 300
num_last_epochs = 15
resume_from = None
interval = 10
# learning policy
lr_config = dict(
_delete_=True,
policy='YOLOX',
warmup='exp',
by_epoch=False,
warmup_by_epoch=True,
warmup_ratio=1,
warmup_iters=5, # 5 epoch
num_last_epochs=num_last_epochs,
min_lr_ratio=0.05)
runner = dict(type='EpochBasedRunner', max_epochs=max_epochs)
custom_hooks = [
dict(
type='YOLOXModeSwitchHook',
num_last_epochs=num_last_epochs,
priority=48),
dict(
type='SyncNormHook',
num_last_epochs=num_last_epochs,
interval=interval,
priority=48),
dict(
type='ExpMomentumEMAHook',
resume_from=resume_from,
momentum=0.0001,
priority=49)
]
checkpoint_config = dict(interval=interval)
evaluation = dict(
save_best='auto',
# The evaluation interval is 'interval' when running epoch is
# less than ‘max_epochs - num_last_epochs’.
# The evaluation interval is 1 when running epoch is greater than
# or equal to ‘max_epochs - num_last_epochs’.
interval=interval,
dynamic_intervals=[(max_epochs - num_last_epochs, 1)],
metric='bbox')
log_config = dict(interval=50)
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
if torch.cuda.is_available():
device_properties = torch.cuda.get_device_properties(0)
total_memory = device_properties.total_memory / (1024**3)
print(f"CUDA memory: {total_memory} GB")
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
if torch.cuda.is_available():
device_properties = torch.cuda.get_device_properties(0)
total_memory = device_properties.total_memory / (1024**3)
print(f"CUDA memory: {total_memory} GB")
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
|
"""Module for argparse for Client"""
def mixin_comm_protocol_parser(parser):
"""Add the arguments for the protocol to the parser
:param parser: the parser configure
"""
from jina.enums import GatewayProtocolType
parser.add_argument(
'--protocol',
type=GatewayProtocolType.from_string,
choices=list(GatewayProtocolType),
default=GatewayProtocolType.GRPC,
help='Communication protocol between server and client.',
)
def mixin_client_features_parser(parser):
"""Add the arguments for the client to the parser
:param parser: the parser configure
"""
parser.add_argument(
'--asyncio',
action='store_true',
default=False,
help='If set, then the input and output of this Client work in an asynchronous manner. ',
)
|
"""Module for argparse for Client"""
def mixin_comm_protocol_parser(parser):
"""Add the arguments for the protocol to the parser
:param parser: the parser configure
"""
from jina.enums import GatewayProtocolType
parser.add_argument(
'--protocol',
type=GatewayProtocolType.from_string,
choices=list(GatewayProtocolType),
default=GatewayProtocolType.GRPC,
help='Communication protocol between server and client.',
)
def mixin_client_features_parser(parser):
"""Add the arguments for the client to the parser
:param parser: the parser configure
"""
parser.add_argument(
'--asyncio',
action='store_true',
default=False,
help='If set, then the input and output of this Client work in an asynchronous manner. ',
)
parser.add_argument(
'--return-responses',
action='store_true',
default=False,
help="If set, return results as List of Requests instead of a reduced DocArray.",
)
|
from pathlib import Path
from typing import Dict
import numpy as np
from jina import DocumentArray, Document, Executor
from ...paddle_image import ImagePaddlehubEncoder
input_dim = 224
target_output_dim = 2048
num_doc = 2
test_data = np.random.rand(num_doc, 3, input_dim, input_dim)
tmp_files = []
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_name == 'xception71_imagenet'
def test_imagepaddlehubencoder_encode(test_images: Dict[str, np.array]):
encoder = ImagePaddlehubEncoder(channel_axis=3)
embeddings = {}
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
assert docs[0].embedding.shape == (2048,)
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
|
import os
from typing import Dict
import numpy as np
from jina import DocumentArray, Document
from ...paddle_image import ImagePaddlehubEncoder
directory = os.path.dirname(os.path.realpath(__file__))
input_dim = 224
target_output_dim = 2048
num_doc = 2
test_data = np.random.rand(num_doc, 3, input_dim, input_dim)
tmp_files = []
def test_imagepaddlehubencoder_encode(test_images: Dict[str, np.array]):
encoder = ImagePaddlehubEncoder(channel_axis=3)
embeddings = {}
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
assert docs[0].embedding.shape == (2048,)
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_epoch(self, runner: object) -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (object): The runner of the training process.
"""
if hasattr(runner.data_loader.sampler, 'set_epoch'): # type: ignore
# in case the data loader uses `SequentialSampler` in Pytorch
runner.data_loader.sampler.set_epoch(runner.epoch) # type: ignore
elif hasattr(
runner.data_loader.batch_sampler.sampler, # type: ignore
'set_epoch'):
# batch sampler in pytorch warps the sampler as its attributes.
runner.data_loader.batch_sampler.sampler.set_epoch( # type: ignore
runner.epoch) # type: ignore
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
def before_epoch(self, runner: object) -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (object): The runner of the training process.
"""
if hasattr(runner.data_loader.sampler, 'set_epoch'): # type: ignore
# in case the data loader uses `SequentialSampler` in Pytorch
runner.data_loader.sampler.set_epoch(runner.epoch) # type: ignore
elif hasattr(
runner.data_loader.batch_sampler.sampler, # type: ignore
'set_epoch'):
# batch sampler in pytorch warps the sampler as its attributes.
runner.data_loader.batch_sampler.sampler.set_epoch( # type: ignore
runner.epoch) # type: ignore
|
from functools import partial
from torchaudio.models import emformer_rnnt_base
from torchaudio.pipelines import RNNTBundle
EMFORMER_RNNT_BASE_MUSTC = RNNTBundle(
_rnnt_path="models/emformer_rnnt_base_mustc.pt",
_rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501),
_global_stats_path="pipeline-assets/global_stats_rnnt_mustc.json",
_sp_model_path="pipeline-assets/spm_bpe_500_mustc.model",
_right_padding=4,
_blank=500,
_sample_rate=16000,
_n_fft=400,
_n_mels=80,
_hop_length=160,
_segment_length=16,
_right_context_length=4,
)
EMFORMER_RNNT_BASE_MUSTC.__doc__ = """Pre-trained Emformer-RNNT-based ASR pipeline capable of performing both
streaming and non-streaming inference.
The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base`
and utilizes weights trained on *MuST-C release v2.0* :cite:`CATTONI2021101155` dataset
using training script ``train.py``
`here <https://github.com/pytorch/audio/tree/main/examples/asr/emformer_rnnt>`__
with ``num_symbols=501``.
Please refer to :py:class:`torchaudio.pipelines.RNNTBundle` for usage instructions.
"""
EMFORMER_RNNT_BASE_TEDLIUM3 = RNNTBundle(
_rnnt_path="models/emformer_rnnt_base_tedlium3.pt",
_rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501),
_global_stats_path="pipeline-assets/global_stats_rnnt_tedlium3.json",
_sp_model_path="pipeline-assets/spm_bpe_500_tedlium3.model",
_right_padding=4,
_blank=500,
_sample_rate=16000,
_n_fft=400,
_n_mels=80,
_hop_length=160,
_segment_length=16,
_right_context_length=4,
)
EMFORMER_RNNT_BASE_TEDLIUM3.__doc__ = """Pre-trained Emformer-RNNT-based ASR pipeline capable of performing both
streaming and non-streaming inference.
The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base`
and utilizes weights trained on *TED-LIUM Release 3* :cite:`rousseau2012tedlium` dataset
using training script ``train.py``
`here <https://github.com/pytorch/audio/tree/main/examples/asr/emformer_rnnt>`__
with ``num_symbols=501``.
Please refer to :py:class:`torchaudio.pipelines.RNNTBundle` for usage instructions.
"""
|
from functools import partial
from torchaudio.models import emformer_rnnt_base
from torchaudio.pipelines import RNNTBundle
EMFORMER_RNNT_BASE_MUSTC = RNNTBundle(
_rnnt_path="models/emformer_rnnt_base_mustc.pt",
_rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501),
_global_stats_path="pipeline-assets/global_stats_rnnt_mustc.json",
_sp_model_path="pipeline-assets/spm_bpe_500_mustc.model",
_right_padding=4,
_blank=500,
_sample_rate=16000,
_n_fft=400,
_n_mels=80,
_hop_length=160,
_segment_length=16,
_right_context_length=4,
)
EMFORMER_RNNT_BASE_MUSTC.__doc__ = """Pre-trained Emformer-RNNT-based ASR pipeline capable of performing both streaming and non-streaming inference.
The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base` and utilizes weights
trained on *MuST-C release v2.0* :cite:`CATTONI2021101155` dataset using training script ``train.py``
`here <https://github.com/pytorch/audio/tree/main/examples/asr/emformer_rnnt>`__ with ``num_symbols=501``.
Please refer to :py:class:`torchaudio.pipelines.RNNTBundle` for usage instructions.
"""
EMFORMER_RNNT_BASE_TEDLIUM3 = RNNTBundle(
_rnnt_path="models/emformer_rnnt_base_tedlium3.pt",
_rnnt_factory_func=partial(emformer_rnnt_base, num_symbols=501),
_global_stats_path="pipeline-assets/global_stats_rnnt_tedlium3.json",
_sp_model_path="pipeline-assets/spm_bpe_500_tedlium3.model",
_right_padding=4,
_blank=500,
_sample_rate=16000,
_n_fft=400,
_n_mels=80,
_hop_length=160,
_segment_length=16,
_right_context_length=4,
)
EMFORMER_RNNT_BASE_TEDLIUM3.__doc__ = """Pre-trained Emformer-RNNT-based ASR pipeline capable of performing both streaming and non-streaming inference.
The underlying model is constructed by :py:func:`torchaudio.models.emformer_rnnt_base`
and utilizes weights trained on TED-LIUM Release 3 dataset using training script ``train.py``
`here <https://github.com/pytorch/audio/tree/main/examples/asr/emformer_rnnt>`__ with ``num_symbols=501``.
Please refer to :py:class:`torchaudio.pipelines.RNNTBundle` for usage instructions.
"""
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=dict(
_delete_=True,
type='SABLHead',
num_classes=80,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1,
loss_weight=1.0))))
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=dict(
_delete_=True,
type='SABLHead',
num_classes=80,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1,
loss_weight=1.0))))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .conditional_detr_layers import (ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer)
from .dab_detr_layers import (DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder)
from .ddq_detr_layers import DDQTransformerDecoder
from .deformable_detr_layers import (DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer)
from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer)
from .dino_layers import CdnQueryGenerator, DinoTransformerDecoder
from .mask2former_layers import (Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder)
from .utils import (MLP, AdaptivePadding, ConditionalAttention, DynamicConv,
PatchEmbed, PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'coordinate_to_encoding',
'ConditionalAttention', 'DABDetrTransformerDecoderLayer',
'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder',
'DDQTransformerDecoder', 'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .conditional_detr_layers import (ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer)
from .dab_detr_layers import (DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder)
from .deformable_detr_layers import (DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer)
from .detr_layers import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer)
from .dino_layers import CdnQueryGenerator, DinoTransformerDecoder
from .mask2former_layers import (Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder)
from .utils import (MLP, AdaptivePadding, ConditionalAttention, DynamicConv,
PatchEmbed, PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'coordinate_to_encoding',
'ConditionalAttention', 'DABDetrTransformerDecoderLayer',
'DABDetrTransformerDecoder', 'DABDetrTransformerEncoder',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder'
]
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
from autogpt_libs.feature_flag.client import (
initialize_launchdarkly,
shutdown_launchdarkly,
)
import backend.data.block
import backend.data.db
import backend.data.graph
import backend.data.user
import backend.server.routers.v1
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
logging.getLogger("autogpt_libs").setLevel(logging.INFO)
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
await backend.data.graph.fix_llm_provider_credentials()
initialize_launchdarkly()
yield
shutdown_launchdarkly()
await backend.data.db.disconnect()
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(Exception, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"])
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return backend.server.routers.v1.execute_graph(graph_id, node_input, user_id)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
is_template=False,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_status(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
import backend.data.block
import backend.data.db
import backend.data.graph
import backend.data.user
import backend.server.routers.v1
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
await backend.data.graph.fix_llm_provider_credentials()
yield
await backend.data.db.disconnect()
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(Exception, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"])
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return backend.server.routers.v1.execute_graph(graph_id, node_input, user_id)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
is_template=False,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_status(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import BaseDataElement as PixelData
from mmengine.data import InstanceData
from mmdet.core import DetDataSample
from mmdet.core.mask import BitmapMasks
from mmdet.datasets.pipelines import PackDetInputs
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_height': 300,
'ori_width': 400,
'height': 600,
'width': 800,
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_height': 300,
'ori_width': 400,
'height': 600,
'width': 800,
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, )
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 2)
self.assertEqual(len(results['data_sample'].ignored_instances), 1)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 3)
self.assertEqual(len(results['data_sample'].ignored_instances), 0)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import BaseDataElement as PixelData
from mmengine.data import InstanceData
from mmdet.core import DetDataSample
from mmdet.core.mask import BitmapMasks
from mmdet.datasets.pipelines import PackDetInputs
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_height': 300,
'ori_width': 400,
'height': 600,
'width': 800,
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_height': 300,
'ori_width': 400,
'height': 600,
'width': 800,
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, )
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 2)
self.assertEqual(len(results['data_sample'].ignored_instances), 1)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 3)
self.assertEqual(len(results['data_sample'].ignored_instances), 0)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
"""A unit test meant to catch accidental introduction of non-optional dependencies."""
from collections.abc import Mapping
from pathlib import Path
from typing import Any
import pytest
import toml
from packaging.requirements import Requirement
HERE = Path(__file__).parent
PYPROJECT_TOML = HERE / "../../pyproject.toml"
@pytest.fixture
def uv_conf() -> dict[str, Any]:
"""Load the pyproject.toml file."""
with open(PYPROJECT_TOML) as f:
return toml.load(f)
def test_required_dependencies(uv_conf: Mapping[str, Any]) -> None:
"""A test that checks if a new non-optional dependency is being introduced.
If this test is triggered, it means that a contributor is trying to introduce a new
required dependency. This should be avoided in most situations.
"""
# Get the dependencies from the [tool.poetry.dependencies] section
dependencies = uv_conf["project"]["dependencies"]
required_dependencies = {Requirement(dep).name for dep in dependencies}
assert sorted(required_dependencies) == sorted(
[
"PyYAML",
"SQLAlchemy",
"async-timeout",
"langchain-core",
"langchain-text-splitters",
"langsmith",
"pydantic",
"requests",
],
)
def test_test_group_dependencies(uv_conf: Mapping[str, Any]) -> None:
"""Check if someone is attempting to add additional test dependencies.
Only dependencies associated with test running infrastructure should be added
to the test group; e.g., pytest, pytest-cov etc.
Examples of dependencies that should NOT be included: boto3, azure, postgres, etc.
"""
dependencies = uv_conf["dependency-groups"]["test"]
test_group_deps = {Requirement(dep).name for dep in dependencies}
assert sorted(test_group_deps) == sorted(
[
"duckdb-engine",
"freezegun",
"langchain-core",
"langchain-tests",
"langchain-text-splitters",
"langchain-openai",
"lark",
"packaging",
"pandas",
"pytest",
"pytest-asyncio",
"pytest-cov",
"pytest-dotenv",
"pytest-mock",
"pytest-socket",
"pytest-watcher",
"pytest-xdist",
"blockbuster",
"responses",
"syrupy",
"toml",
"requests-mock",
# TODO: temporary hack since cffi 1.17.1 doesn't work with py 3.9.
"cffi",
"numpy",
],
)
|
"""A unit test meant to catch accidental introduction of non-optional dependencies."""
from collections.abc import Mapping
from pathlib import Path
from typing import Any
import pytest
import toml
from packaging.requirements import Requirement
HERE = Path(__file__).parent
PYPROJECT_TOML = HERE / "../../pyproject.toml"
@pytest.fixture()
def uv_conf() -> dict[str, Any]:
"""Load the pyproject.toml file."""
with open(PYPROJECT_TOML) as f:
return toml.load(f)
def test_required_dependencies(uv_conf: Mapping[str, Any]) -> None:
"""A test that checks if a new non-optional dependency is being introduced.
If this test is triggered, it means that a contributor is trying to introduce a new
required dependency. This should be avoided in most situations.
"""
# Get the dependencies from the [tool.poetry.dependencies] section
dependencies = uv_conf["project"]["dependencies"]
required_dependencies = {Requirement(dep).name for dep in dependencies}
assert sorted(required_dependencies) == sorted(
[
"PyYAML",
"SQLAlchemy",
"async-timeout",
"langchain-core",
"langchain-text-splitters",
"langsmith",
"pydantic",
"requests",
]
)
def test_test_group_dependencies(uv_conf: Mapping[str, Any]) -> None:
"""Check if someone is attempting to add additional test dependencies.
Only dependencies associated with test running infrastructure should be added
to the test group; e.g., pytest, pytest-cov etc.
Examples of dependencies that should NOT be included: boto3, azure, postgres, etc.
"""
dependencies = uv_conf["dependency-groups"]["test"]
test_group_deps = {Requirement(dep).name for dep in dependencies}
assert sorted(test_group_deps) == sorted(
[
"duckdb-engine",
"freezegun",
"langchain-core",
"langchain-tests",
"langchain-text-splitters",
"langchain-openai",
"lark",
"packaging",
"pandas",
"pytest",
"pytest-asyncio",
"pytest-cov",
"pytest-dotenv",
"pytest-mock",
"pytest-socket",
"pytest-watcher",
"pytest-xdist",
"blockbuster",
"responses",
"syrupy",
"toml",
"requests-mock",
# TODO: temporary hack since cffi 1.17.1 doesn't work with py 3.9.
"cffi",
"numpy",
]
)
|
"""Message responsible for deleting other messages."""
from typing import Any, Literal
from langchain_core.messages.base import BaseMessage
class RemoveMessage(BaseMessage):
"""Message responsible for deleting other messages."""
type: Literal["remove"] = "remove"
"""The type of the message (used for serialization). Defaults to "remove"."""
def __init__(self, id: str, **kwargs: Any) -> None:
"""Create a RemoveMessage.
Args:
id: The ID of the message to remove.
kwargs: Additional fields to pass to the message.
Raises:
ValueError: If the 'content' field is passed in kwargs.
"""
if kwargs.pop("content", None):
msg = "RemoveMessage does not support 'content' field."
raise ValueError(msg)
super().__init__("", id=id, **kwargs)
|
"""Message responsible for deleting other messages."""
from typing import Any, Literal
from langchain_core.messages.base import BaseMessage
class RemoveMessage(BaseMessage):
"""Message responsible for deleting other messages."""
type: Literal["remove"] = "remove"
"""The type of the message (used for serialization). Defaults to "remove"."""
def __init__(self, id: str, **kwargs: Any) -> None:
"""Create a RemoveMessage.
Args:
id: The ID of the message to remove.
kwargs: Additional fields to pass to the message.
Raises:
ValueError: If the 'content' field is passed in kwargs.
"""
if kwargs.pop("content", None):
msg = "RemoveMessage does not support 'content' field."
raise ValueError(msg)
super().__init__("", id=id, **kwargs)
RemoveMessage.model_rebuild()
|
from langchain_core.tools import BaseTool, tool
from langchain_tests.integration_tests import ToolsIntegrationTests
from langchain_tests.unit_tests import ToolsUnitTests
@tool
def parrot_multiply_tool(a: int, b: int) -> int:
"""Multiply two numbers like a parrot. Parrots always add eighty for their matey."""
return a * b + 80
class TestParrotMultiplyToolUnit(ToolsUnitTests):
@property
def tool_constructor(self) -> BaseTool:
return parrot_multiply_tool
@property
def tool_invoke_params_example(self) -> dict:
"""Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
class TestParrotMultiplyToolIntegration(ToolsIntegrationTests):
@property
def tool_constructor(self) -> BaseTool:
return parrot_multiply_tool
@property
def tool_invoke_params_example(self) -> dict:
"""Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
|
from langchain_core.tools import BaseTool, tool
from langchain_tests.integration_tests import ToolsIntegrationTests
from langchain_tests.unit_tests import ToolsUnitTests
@tool
def parrot_multiply_tool(a: int, b: int) -> int:
"""Multiply two numbers like a parrot. Parrots always add eighty for their matey."""
return a * b + 80
class TestParrotMultiplyToolUnit(ToolsUnitTests):
@property
def tool_constructor(self) -> BaseTool:
return parrot_multiply_tool
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
class TestParrotMultiplyToolIntegration(ToolsIntegrationTests):
@property
def tool_constructor(self) -> BaseTool:
return parrot_multiply_tool
@property
def tool_invoke_params_example(self) -> dict:
"""
Returns a dictionary representing the "args" of an example tool call.
This should NOT be a ToolCall dict - i.e. it should not
have {"name", "id", "args"} keys.
"""
return {"a": 2, "b": 3}
|
_base_ = [
'../_base_/models/cascade-mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)))
optim_wrapper = dict(
optimizer=dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05),
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
_base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)))
optim_wrapper = dict(
optimizer=dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05),
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from jina import Document, DocumentArray
from match_merger import MatchMerger
@pytest.fixture
def docs_matrix():
return [
DocumentArray(
[
Document(
id=f'doc {i}',
matches=[Document(id=f'doc {i}, match {j}') for j in range(3)],
chunks=[
Document(
id=f'doc {i}, chunk {j}',
matches=[
Document(id=f'doc {i}, chunk {j}, match {k}')
for k in range(2)
],
)
for j in range(3)
],
)
for i in range(2)
]
)
for shard in range(4)
]
def test_root_traversal(docs_matrix):
executor = MatchMerger()
document_array = executor.merge(docs_matrix=docs_matrix, parameters={})
assert len(document_array) == 2
for d in document_array:
assert len(d.matches) == 12
def test_chunk_traversal(docs_matrix):
executor = MatchMerger(default_traversal_paths=('c',))
document_array = executor.merge(docs_matrix=docs_matrix, parameters={})
assert len(document_array) == 6
for d in document_array:
assert len(d.matches) == 8
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from jina import Document, DocumentArray
from ...match_merger import MatchMerger
@pytest.fixture
def docs_matrix():
return [
DocumentArray(
[
Document(
id=f'doc {i}',
matches=[Document(id=f'doc {i}, match {j}') for j in range(3)],
chunks=[
Document(
id=f'doc {i}, chunk {j}',
matches=[
Document(id=f'doc {i}, chunk {j}, match {k}')
for k in range(2)
],
)
for j in range(3)
],
)
for i in range(2)
]
)
for shard in range(4)
]
def test_root_traversal(docs_matrix):
executor = MatchMerger()
document_array = executor.merge(docs_matrix=docs_matrix, parameters={})
assert len(document_array) == 2
for d in document_array:
assert len(d.matches) == 12
def test_chunk_traversal(docs_matrix):
executor = MatchMerger(default_traversal_paths=('c',))
document_array = executor.merge(docs_matrix=docs_matrix, parameters={})
assert len(document_array) == 6
for d in document_array:
assert len(d.matches) == 8
|
import tempfile
from enum import Enum
from typing import Any, Dict, Optional, Union
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
def _import_elevenlabs() -> Any:
try:
import elevenlabs
except ImportError as e:
raise ImportError(
"Cannot import elevenlabs, please install `pip install elevenlabs`."
) from e
return elevenlabs
class ElevenLabsModel(str, Enum):
"""Models available for Eleven Labs Text2Speech."""
MULTI_LINGUAL = "eleven_multilingual_v2"
MULTI_LINGUAL_FLASH = "eleven_flash_v2_5"
MONO_LINGUAL = "eleven_flash_v2"
class ElevenLabsText2SpeechTool(BaseTool):
"""Tool that queries the Eleven Labs Text2Speech API.
In order to set this up, follow instructions at:
https://elevenlabs.io/docs
"""
model: Union[ElevenLabsModel, str] = ElevenLabsModel.MULTI_LINGUAL
voice: str = "JBFqnCBsd6RMkjVDRZzb"
name: str = "eleven_labs_text2speech"
description: str = (
"A wrapper around Eleven Labs Text2Speech. "
"Useful for when you need to convert text to speech. "
"It supports more than 30 languages, including English, German, Polish, "
"Spanish, Italian, French, Portuguese, and Hindi. "
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key exists in environment."""
_ = get_from_dict_or_env(values, "elevenlabs_api_key", "ELEVENLABS_API_KEY")
return values
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
elevenlabs = _import_elevenlabs()
client = elevenlabs.client.ElevenLabs()
try:
speech = client.text_to_speech.convert(
text=query,
model_id=self.model,
voice_id=self.voice,
output_format="mp3_44100_128",
)
with tempfile.NamedTemporaryFile(
mode="bx", suffix=".mp3", delete=False
) as f:
f.write(speech)
return f.name
except Exception as e:
raise RuntimeError(f"Error while running ElevenLabsText2SpeechTool: {e}")
def play(self, speech_file: str) -> None:
"""Play the text as speech."""
elevenlabs = _import_elevenlabs()
with open(speech_file, mode="rb") as f:
speech = f.read()
elevenlabs.play(speech)
def stream_speech(self, query: str) -> None:
"""Stream the text as speech as it is generated.
Play the text in your speakers."""
elevenlabs = _import_elevenlabs()
client = elevenlabs.client.ElevenLabs()
speech_stream = client.text_to_speech.convert_as_stream(
text=query, model_id=self.model, voice_id=self.voice
)
elevenlabs.stream(speech_stream)
|
import tempfile
from enum import Enum
from typing import Any, Dict, Optional, Union
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
def _import_elevenlabs() -> Any:
try:
import elevenlabs
except ImportError as e:
raise ImportError(
"Cannot import elevenlabs, please install `pip install elevenlabs`."
) from e
return elevenlabs
class ElevenLabsModel(str, Enum):
"""Models available for Eleven Labs Text2Speech."""
MULTI_LINGUAL = "eleven_multilingual_v2"
MULTI_LINGUAL_FLASH = "eleven_flash_v2_5"
MONO_LINGUAL = "eleven_flash_v2"
class ElevenLabsText2SpeechTool(BaseTool): # type: ignore[override]
"""Tool that queries the Eleven Labs Text2Speech API.
In order to set this up, follow instructions at:
https://elevenlabs.io/docs
"""
model: Union[ElevenLabsModel, str] = ElevenLabsModel.MULTI_LINGUAL
voice: str = "JBFqnCBsd6RMkjVDRZzb"
name: str = "eleven_labs_text2speech"
description: str = (
"A wrapper around Eleven Labs Text2Speech. "
"Useful for when you need to convert text to speech. "
"It supports more than 30 languages, including English, German, Polish, "
"Spanish, Italian, French, Portuguese, and Hindi. "
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key exists in environment."""
_ = get_from_dict_or_env(values, "elevenlabs_api_key", "ELEVENLABS_API_KEY")
return values
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
elevenlabs = _import_elevenlabs()
client = elevenlabs.client.ElevenLabs()
try:
speech = client.text_to_speech.convert(
text=query,
model_id=self.model,
voice_id=self.voice,
output_format="mp3_44100_128",
)
with tempfile.NamedTemporaryFile(
mode="bx", suffix=".mp3", delete=False
) as f:
f.write(speech)
return f.name
except Exception as e:
raise RuntimeError(f"Error while running ElevenLabsText2SpeechTool: {e}")
def play(self, speech_file: str) -> None:
"""Play the text as speech."""
elevenlabs = _import_elevenlabs()
with open(speech_file, mode="rb") as f:
speech = f.read()
elevenlabs.play(speech)
def stream_speech(self, query: str) -> None:
"""Stream the text as speech as it is generated.
Play the text in your speakers."""
elevenlabs = _import_elevenlabs()
client = elevenlabs.client.ElevenLabs()
speech_stream = client.text_to_speech.convert_as_stream(
text=query, model_id=self.model, voice_id=self.voice
)
elevenlabs.stream(speech_stream)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.16'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.15'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage writer
WRITERS = Registry('writer')
|
from __future__ import annotations
from sentence_transformers.losses.MSELoss import MSELoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMSELoss(MSELoss):
def __init__(self, model: SparseEncoder) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
Args:
model: SparseEncoder
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Inputs:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Relations:
- :class:`SparseMarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
}
)
def compute_labels(batch):
return {"label": teacher_model.encode(batch["english"], convert_to_sparse_tensor=False)}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.SparseMSELoss(student_model)
trainer = SparseEncoderTrainer(model=student_model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model)
|
from __future__ import annotations
from sentence_transformers.losses.MSELoss import MSELoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMSELoss(MSELoss):
def __init__(self, model: SparseEncoder) -> None:
return super().__init__(model)
|
__version__ = '0.36.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.35.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
import os
import time
import pytest
from jina import Flow, Document, Client
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def docker_image():
import docker
client = docker.from_env()
client.images.build(path=os.path.join(cur_dir), tag='override-config-test')
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
client.close()
@pytest.fixture()
def flow(request, port_generator):
exposed_port = port_generator()
flow_src = request.param
if flow_src == 'yml':
return Flow(port=exposed_port).load_config(os.path.join(cur_dir, 'flow.yml'))
elif flow_src == 'python':
return Flow(port=exposed_port).add(
uses='docker://override-config-test',
uses_with={'param1': 50, 'param2': 30},
uses_metas={'workspace': 'different_workspace'},
)
@pytest.mark.parametrize('flow', ['yml', 'python'], indirect=['flow'])
def test_override_config_params(docker_image, flow):
with flow:
resps = Client(port=flow.port).search(
inputs=[Document()], return_responses=True
)
doc = resps[0].docs[0]
assert doc.tags['param1'] == 50
assert doc.tags['param2'] == 30
assert doc.tags['param3'] == 10 # not overriden
assert doc.tags['name'] == 'name' # not override
assert doc.tags['workspace'] == 'different_workspace'
def test_override_config_params_shards(docker_image, port_generator):
exposed_port = port_generator()
flow = Flow(port=exposed_port).add(
uses='docker://override-config-test',
uses_with={'param1': 50, 'param2': 30},
uses_metas={'workspace': 'different_workspace'},
shards=2,
)
with flow:
resps = Client(port=exposed_port).search(
inputs=[Document()], return_responses=True
)
doc = resps[0].docs[0]
assert doc.tags['param1'] == 50
assert doc.tags['param2'] == 30
assert doc.tags['param3'] == 10 # not overriden
assert doc.tags['name'] == 'name' # not override
assert doc.tags['workspace'] == 'different_workspace'
|
import os
import time
import pytest
from jina import Flow, Document, Client
cur_dir = os.path.dirname(os.path.abspath(__file__))
exposed_port = 12345
@pytest.fixture()
def docker_image():
import docker
client = docker.from_env()
client.images.build(path=os.path.join(cur_dir), tag='override-config-test')
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
client.close()
@pytest.fixture()
def flow(request):
flow_src = request.param
if flow_src == 'yml':
return Flow(port=exposed_port).load_config(os.path.join(cur_dir, 'flow.yml'))
elif flow_src == 'python':
return Flow(port=exposed_port).add(
uses='docker://override-config-test',
uses_with={'param1': 50, 'param2': 30},
uses_metas={'workspace': 'different_workspace'},
)
@pytest.mark.parametrize('flow', ['yml', 'python'], indirect=['flow'])
def test_override_config_params(docker_image, flow):
with flow:
resps = Client(port=exposed_port, return_responses=True).search(
inputs=[Document()]
)
doc = resps[0].docs[0]
assert doc.tags['param1'] == 50
assert doc.tags['param2'] == 30
assert doc.tags['param3'] == 10 # not overriden
assert doc.tags['name'] == 'name' # not override
assert doc.tags['workspace'] == 'different_workspace'
def test_override_config_params_shards(docker_image):
flow = Flow(port=exposed_port).add(
uses='docker://override-config-test',
uses_with={'param1': 50, 'param2': 30},
uses_metas={'workspace': 'different_workspace'},
shards=2,
)
with flow:
resps = Client(port=exposed_port, return_responses=True).search(
inputs=[Document()]
)
doc = resps[0].docs[0]
assert doc.tags['param1'] == 50
assert doc.tags['param2'] == 30
assert doc.tags['param3'] == 10 # not overriden
assert doc.tags['name'] == 'name' # not override
assert doc.tags['workspace'] == 'different_workspace'
|
from llama_index_instrumentation.span_handlers.null import NullSpanHandler # noqa
|
import inspect
from typing import Dict, Optional, Any
from llama_index.core.instrumentation.span_handlers.base import BaseSpanHandler
from llama_index.core.instrumentation.span.base import BaseSpan
class NullSpanHandler(BaseSpanHandler[BaseSpan]):
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "NullSpanHandler"
def span_enter(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Logic for entering a span."""
return
def span_exit(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Logic for exiting a span."""
return
def new_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_span_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Create a span."""
return
def prepare_to_exit_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Logic for exiting a span."""
return
def prepare_to_drop_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
err: Optional[BaseException] = None,
**kwargs: Any,
) -> None:
"""Logic for droppping a span."""
return
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import import_vectors
from .. import HnswlibSearcher
# fix the seed here
np.random.seed(500)
docs = DocumentArray([Document(embedding=np.random.random(10)) for i in range(10)])
search_doc = DocumentArray([Document(embedding=np.random.random(10))])
DUMP_PATH = 'tests/dump1'
TOP_K = 5
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
yield metas
del os.environ['TEST_WORKSPACE']
@pytest.mark.parametrize(['metric', 'is_distance'],
[('l2', True), ('ip', True), ('cosine', True),
('l2', False), ('ip', False), ('cosine', False)])
def test_metric(tmpdir, metric, is_distance):
metas = {'workspace': str(tmpdir), 'name': 'searcher', 'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, metric=metric, is_distance=is_distance)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == TOP_K
for i in range(len(docs[0].matches) - 1):
if not is_distance:
assert docs[0].matches[i].scores[metric].value >= docs[0].matches[i + 1].scores[metric].value
else:
assert docs[0].matches[i].scores[metric].value <= docs[0].matches[i + 1].scores[metric].value
def test_query_vector(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher', 'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
ids, vecs = import_vectors(DUMP_PATH, str(0))
ids = np.array(list(ids))
vecs = np.array(list(vecs))
assert len(docs) == 1
assert len(docs[0].matches) == TOP_K
assert docs[0].matches[0].id in ids
assert len(docs[0].matches[0].embedding) == 7
assert docs[0].matches[0].embedding in vecs
da = DocumentArray([Document(id=0), Document(id=1), Document(id=2)])
indexer.fill_embedding(da)
for i, doc in enumerate(da):
assert list(doc.embedding)
def test_none_doc(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher', 'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas)
indexer.search(None, {})
indexer.fill_embedding(None)
def test_query_vector_empty(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher', 'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(default_top_k=TOP_K, metas=metas)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == 0
def test_flow(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher', 'pea_id': 0, 'replica_id': 0}
flow = Flow().add(uses=HnswlibSearcher, override_with={'dump_path': DUMP_PATH, 'default_top_k': TOP_K},
override_metas=metas)
with flow:
resp = flow.post(
on='/search',
inputs=DocumentArray([Document(embedding=np.random.random(7))]),
return_results=True
)
assert len(resp[0].data.docs[0].matches) == TOP_K
doc_array = DocumentArray([Document(id=0), Document(id=1), Document(id=2)])
with flow:
resp = flow.post(
on='/fill_embedding',
inputs=doc_array,
return_results=True
)
for i, doc in enumerate(resp[0].data.docs):
assert doc.embedding
assert doc.embedding.dense.shape == [7]
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import import_vectors
from .. import HnswlibSearcher
# fix the seed here
np.random.seed(500)
docs = DocumentArray([Document(embedding=np.random.random(10)) for i in range(10)])
search_doc = DocumentArray([Document(embedding=np.random.random(10))])
DUMP_PATH = 'tests/dump1'
TOP_K = 5
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
yield metas
del os.environ['TEST_WORKSPACE']
def test_query_vector(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher', 'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
ids, vecs = import_vectors(DUMP_PATH, str(0))
ids = np.array(list(ids))
vecs = np.array(list(vecs))
assert len(docs) == 1
assert len(docs[0].matches) == TOP_K
assert docs[0].matches[0].id in ids
assert len(docs[0].matches[0].embedding) == 7
assert docs[0].matches[0].embedding in vecs
da = DocumentArray([Document(id=0), Document(id=1), Document(id=2)])
indexer.fill_embedding(da)
for i, doc in enumerate(da):
assert list(doc.embedding)
def test_none_doc(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher', 'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas)
indexer.search(None, {})
indexer.fill_embedding(None)
def test_query_vector_empty(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher', 'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(default_top_k=TOP_K, metas=metas)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == 0
def test_flow(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher', 'pea_id': 0, 'replica_id': 0}
flow = Flow().add(uses=HnswlibSearcher, override_with={'dump_path': DUMP_PATH, 'default_top_k': TOP_K},
override_metas=metas)
with flow:
resp = flow.post(
on='/search',
inputs=DocumentArray([Document(embedding=np.random.random(7))]),
return_results=True
)
assert len(resp[0].data.docs[0].matches) == TOP_K
doc_array = DocumentArray([Document(id=0), Document(id=1), Document(id=2)])
with flow:
resp = flow.post(
on='/fill_embedding',
inputs=doc_array,
return_results=True
)
for i, doc in enumerate(resp[0].data.docs):
assert doc.embedding
assert doc.embedding.dense.shape == [7]
|
# ruff: noqa: E402
import pytest
# Rewrite assert statements for test suite so that implementations can
# see the full error message from failed asserts.
# https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#assertion-rewriting
modules = [
"base_store",
"cache",
"chat_models",
"vectorstores",
"embeddings",
"tools",
"retrievers",
]
for module in modules:
pytest.register_assert_rewrite(f"langchain_tests.integration_tests.{module}")
from .base_store import BaseStoreAsyncTests, BaseStoreSyncTests
from .cache import AsyncCacheTestSuite, SyncCacheTestSuite
from .chat_models import ChatModelIntegrationTests
from .embeddings import EmbeddingsIntegrationTests
from .retrievers import RetrieversIntegrationTests
from .tools import ToolsIntegrationTests
from .vectorstores import VectorStoreIntegrationTests
__all__ = [
"AsyncCacheTestSuite",
"BaseStoreAsyncTests",
"BaseStoreSyncTests",
"ChatModelIntegrationTests",
"EmbeddingsIntegrationTests",
"RetrieversIntegrationTests",
"SyncCacheTestSuite",
"ToolsIntegrationTests",
"VectorStoreIntegrationTests",
]
|
# ruff: noqa: E402
import pytest
# Rewrite assert statements for test suite so that implementations can
# see the full error message from failed asserts.
# https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#assertion-rewriting
modules = [
"base_store",
"cache",
"chat_models",
"vectorstores",
"embeddings",
"tools",
"retrievers",
]
for module in modules:
pytest.register_assert_rewrite(f"langchain_tests.integration_tests.{module}")
from .base_store import BaseStoreAsyncTests, BaseStoreSyncTests
from .cache import AsyncCacheTestSuite, SyncCacheTestSuite
from .chat_models import ChatModelIntegrationTests
from .embeddings import EmbeddingsIntegrationTests
from .retrievers import RetrieversIntegrationTests
from .tools import ToolsIntegrationTests
from .vectorstores import VectorStoreIntegrationTests
__all__ = [
"ChatModelIntegrationTests",
"EmbeddingsIntegrationTests",
"ToolsIntegrationTests",
"BaseStoreAsyncTests",
"BaseStoreSyncTests",
"AsyncCacheTestSuite",
"SyncCacheTestSuite",
"VectorStoreIntegrationTests",
"RetrieversIntegrationTests",
]
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from docarray import BaseDoc
from docarray.typing import TorchTensor
def test_tensor_ops():
class A(BaseDoc):
tensor: TorchTensor[3, 224, 224]
class B(BaseDoc):
tensor: TorchTensor[3, 112, 224]
tensor = A(tensor=torch.ones(3, 224, 224)).tensor
tensord = A(tensor=torch.ones(3, 224, 224)).tensor
tensorn = torch.zeros(3, 224, 224)
tensorhalf = B(tensor=torch.ones(3, 112, 224)).tensor
tensorfull = torch.cat([tensorhalf, tensorhalf], dim=1)
assert type(tensor) == TorchTensor
assert type(tensor + tensord) == TorchTensor
assert type(tensor + tensorn) == TorchTensor
assert type(tensor + tensorfull) == TorchTensor
|
import torch
from docarray import BaseDoc
from docarray.typing import TorchTensor
def test_tensor_ops():
class A(BaseDoc):
tensor: TorchTensor[3, 224, 224]
class B(BaseDoc):
tensor: TorchTensor[3, 112, 224]
tensor = A(tensor=torch.ones(3, 224, 224)).tensor
tensord = A(tensor=torch.ones(3, 224, 224)).tensor
tensorn = torch.zeros(3, 224, 224)
tensorhalf = B(tensor=torch.ones(3, 112, 224)).tensor
tensorfull = torch.cat([tensorhalf, tensorhalf], dim=1)
assert type(tensor) == TorchTensor
assert type(tensor + tensord) == TorchTensor
assert type(tensor + tensorn) == TorchTensor
assert type(tensor + tensorfull) == TorchTensor
|
from typing import overload, Dict, Optional, List, TYPE_CHECKING, Sequence, Any
from docarray.document.data import DocumentData
from docarray.document.mixins import AllMixins
from docarray.base import BaseDCType
from docarray.math.ndarray import detach_tensor_if_present
if TYPE_CHECKING:
from docarray.typing import ArrayType, StructValueType, DocumentContentType
class Document(AllMixins, BaseDCType):
"""Document is the basic data type in DocArray.
A Document is a container for any kind of data, be it text, image, audio, video, or 3D meshes.
You can initialize a Document object with given attributes:
.. code-block:: python
from docarray import Document
import numpy
d1 = Document(text='hello')
d3 = Document(tensor=numpy.array([1, 2, 3]))
d4 = Document(
uri='https://jina.ai',
mime_type='text/plain',
granularity=1,
adjacency=3,
tags={'foo': 'bar'},
)
Documents support a :ref:`nested structure <recursive-nested-document>`, which can also be specified during construction:
.. code-block:: python
d = Document(
id='d0',
chunks=[Document(id='d1', chunks=Document(id='d2'))],
matches=[Document(id='d3')],
)
A Document can embed its contents using the :meth:`embed` method and a provided embedding model:
.. code-block:: python
import torchvision
q = (
Document(uri='/Users/usr/path/to/image.jpg')
.load_uri_to_image_tensor()
.set_image_tensor_normalization()
.set_image_tensor_channel_axis(-1, 0)
)
model = torchvision.models.resnet50(pretrained=True)
q.embed(model)
Multiple Documents can be organized into a :class:`~docarray.array.document.DocumentArray`.
.. seealso::
For further details, see our :ref:`user guide <document>`.
"""
_data_class = DocumentData
_unresolved_fields_dest = 'tags'
_post_init_fields = (
'text',
'blob',
'tensor',
'content',
'uri',
'mime_type',
'chunks',
'matches',
)
@overload
def __init__(self):
"""Create an empty Document."""
...
@overload
def __init__(self, _obj: Optional['Document'] = None, copy: bool = False):
...
@overload
def __init__(self, _obj: Optional[Any] = None):
"""Create a Document from a `docarray.dataclass` instance"""
...
@overload
def __init__(
self,
_obj: Optional[Dict],
copy: bool = False,
field_resolver: Optional[Dict[str, str]] = None,
unknown_fields_handler: str = 'catch',
):
...
@overload
def __init__(self, blob: Optional[bytes] = None, **kwargs):
"""Create a Document with binary content."""
...
@overload
def __init__(self, tensor: Optional['ArrayType'] = None, **kwargs):
"""Create a Document with NdArray-like content."""
...
@overload
def __init__(self, text: Optional[str] = None, **kwargs):
"""Create a Document with string content."""
...
@overload
def __init__(self, uri: Optional[str] = None, **kwargs):
"""Create a Document with content from a URI."""
...
@overload
def __init__(
self,
parent_id: Optional[str] = None,
granularity: Optional[int] = None,
adjacency: Optional[int] = None,
blob: Optional[bytes] = None,
tensor: Optional['ArrayType'] = None,
mime_type: Optional[str] = None,
text: Optional[str] = None,
content: Optional['DocumentContentType'] = None,
weight: Optional[float] = None,
uri: Optional[str] = None,
tags: Optional[Dict[str, 'StructValueType']] = None,
offset: Optional[float] = None,
location: Optional[List[float]] = None,
embedding: Optional['ArrayType'] = None,
modality: Optional[str] = None,
evaluations: Optional[Dict[str, Dict[str, 'StructValueType']]] = None,
scores: Optional[Dict[str, Dict[str, 'StructValueType']]] = None,
chunks: Optional[Sequence['Document']] = None,
matches: Optional[Sequence['Document']] = None,
):
...
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getstate__(self):
state = self.__dict__.copy()
for attribute in ['embedding', 'tensor']:
if hasattr(self, attribute):
setattr(
state['_data'],
attribute,
detach_tensor_if_present(getattr(state['_data'], attribute)),
)
return state
|
from typing import overload, Dict, Optional, List, TYPE_CHECKING, Sequence, Any
from .data import DocumentData
from .mixins import AllMixins
from ..base import BaseDCType
from ..math.ndarray import detach_tensor_if_present
if TYPE_CHECKING:
from ..typing import ArrayType, StructValueType, DocumentContentType
class Document(AllMixins, BaseDCType):
"""Document is the basic data type in DocArray.
A Document is a container for any kind of data, be it text, image, audio, video, or 3D meshes.
You can initialize a Document object with given attributes:
.. code-block:: python
from docarray import Document
import numpy
d1 = Document(text='hello')
d3 = Document(tensor=numpy.array([1, 2, 3]))
d4 = Document(
uri='https://jina.ai',
mime_type='text/plain',
granularity=1,
adjacency=3,
tags={'foo': 'bar'},
)
Documents support a :ref:`nested structure <recursive-nested-document>`, which can also be specified during construction:
.. code-block:: python
d = Document(
id='d0',
chunks=[Document(id='d1', chunks=Document(id='d2'))],
matches=[Document(id='d3')],
)
A Document can embed its contents using the :meth:`embed` method and a provided embedding model:
.. code-block:: python
import torchvision
q = (
Document(uri='/Users/usr/path/to/image.jpg')
.load_uri_to_image_tensor()
.set_image_tensor_normalization()
.set_image_tensor_channel_axis(-1, 0)
)
model = torchvision.models.resnet50(pretrained=True)
q.embed(model)
Multiple Documents can be organized into a :class:`~docarray.array.document.DocumentArray`.
.. seealso::
For further details, see our :ref:`user guide <document>`.
"""
_data_class = DocumentData
_unresolved_fields_dest = 'tags'
_post_init_fields = (
'text',
'blob',
'tensor',
'content',
'uri',
'mime_type',
'chunks',
'matches',
)
@overload
def __init__(self):
"""Create an empty Document."""
...
@overload
def __init__(self, _obj: Optional['Document'] = None, copy: bool = False):
...
@overload
def __init__(self, _obj: Optional[Any] = None):
"""Create a Document from a `docarray.dataclass` instance"""
...
@overload
def __init__(
self,
_obj: Optional[Dict],
copy: bool = False,
field_resolver: Optional[Dict[str, str]] = None,
unknown_fields_handler: str = 'catch',
):
...
@overload
def __init__(self, blob: Optional[bytes] = None, **kwargs):
"""Create a Document with binary content."""
...
@overload
def __init__(self, tensor: Optional['ArrayType'] = None, **kwargs):
"""Create a Document with NdArray-like content."""
...
@overload
def __init__(self, text: Optional[str] = None, **kwargs):
"""Create a Document with string content."""
...
@overload
def __init__(self, uri: Optional[str] = None, **kwargs):
"""Create a Document with content from a URI."""
...
@overload
def __init__(
self,
parent_id: Optional[str] = None,
granularity: Optional[int] = None,
adjacency: Optional[int] = None,
blob: Optional[bytes] = None,
tensor: Optional['ArrayType'] = None,
mime_type: Optional[str] = None,
text: Optional[str] = None,
content: Optional['DocumentContentType'] = None,
weight: Optional[float] = None,
uri: Optional[str] = None,
tags: Optional[Dict[str, 'StructValueType']] = None,
offset: Optional[float] = None,
location: Optional[List[float]] = None,
embedding: Optional['ArrayType'] = None,
modality: Optional[str] = None,
evaluations: Optional[Dict[str, Dict[str, 'StructValueType']]] = None,
scores: Optional[Dict[str, Dict[str, 'StructValueType']]] = None,
chunks: Optional[Sequence['Document']] = None,
matches: Optional[Sequence['Document']] = None,
):
...
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getstate__(self):
state = self.__dict__.copy()
for attribute in ['embedding', 'tensor']:
if hasattr(self, attribute):
setattr(
state['_data'],
attribute,
detach_tensor_if_present(getattr(state['_data'], attribute)),
)
return state
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
"""
from __future__ import annotations
from torch.utils.data import Dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.readers.InputExample import InputExample
class SentencesDataset(Dataset):
"""
DEPRECATED: This class is no longer used. Instead of wrapping your List of InputExamples in a SentencesDataset
and then passing it to the DataLoader, you can pass the list of InputExamples directly to the dataset loader.
"""
def __init__(self, examples: list[InputExample], model: SentenceTransformer):
self.examples = examples
def __getitem__(self, item):
return self.examples[item]
def __len__(self):
return len(self.examples)
|
from __future__ import annotations
from torch.utils.data import Dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.readers.InputExample import InputExample
class SentencesDataset(Dataset):
"""
DEPRECATED: This class is no longer used. Instead of wrapping your List of InputExamples in a SentencesDataset
and then passing it to the DataLoader, you can pass the list of InputExamples directly to the dataset loader.
"""
def __init__(self, examples: list[InputExample], model: SentenceTransformer):
self.examples = examples
def __getitem__(self, item):
return self.examples[item]
def __len__(self):
return len(self.examples)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, EVALUATORS, HOOKS, MODELS,
OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS,
RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS, TRANSFORMS,
WEIGHT_INITIALIZERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'EVALUATORS'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, MODELS,
OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS,
RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS, TRANSFORMS,
WEIGHT_INITIALIZERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS'
]
|
"""Callback Handler that tracks AIMessage.usage_metadata."""
import threading
from collections.abc import Generator
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Optional
from langchain_core._api import beta
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import AIMessage
from langchain_core.messages.ai import UsageMetadata, add_usage
from langchain_core.outputs import ChatGeneration, LLMResult
@beta()
class UsageMetadataCallbackHandler(BaseCallbackHandler):
"""Callback Handler that tracks AIMessage.usage_metadata.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import UsageMetadataCallbackHandler
llm = init_chat_model(model="openai:gpt-4o-mini")
callback = UsageMetadataCallbackHandler()
results = llm.batch(["Hello", "Goodbye"], config={"callbacks": [callback]})
print(callback.usage_metadata)
.. code-block:: none
{'output_token_details': {'audio': 0, 'reasoning': 0}, 'input_tokens': 17, 'output_tokens': 31, 'total_tokens': 48, 'input_token_details': {'cache_read': 0, 'audio': 0}}
.. versionadded:: 0.3.49
""" # noqa: E501
def __init__(self) -> None:
super().__init__()
self._lock = threading.Lock()
self.usage_metadata: dict[str, UsageMetadata] = {}
def __repr__(self) -> str:
return str(self.usage_metadata)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Collect token usage."""
# Check for usage_metadata (langchain-core >= 0.2.2)
try:
generation = response.generations[0][0]
except IndexError:
generation = None
usage_metadata = None
model_name = None
if isinstance(generation, ChatGeneration):
try:
message = generation.message
if isinstance(message, AIMessage):
usage_metadata = message.usage_metadata
model_name = message.response_metadata.get("model_name")
except AttributeError:
pass
# update shared state behind lock
if usage_metadata and model_name:
with self._lock:
if model_name not in self.usage_metadata:
self.usage_metadata[model_name] = usage_metadata
else:
self.usage_metadata[model_name] = add_usage(
self.usage_metadata[model_name], usage_metadata
)
@contextmanager
@beta()
def get_usage_metadata_callback(
name: str = "usage_metadata_callback",
) -> Generator[UsageMetadataCallbackHandler, None, None]:
"""Get context manager for tracking usage metadata across chat model calls using
``AIMessage.usage_metadata``.
Args:
name (str): The name of the context variable. Defaults to
``"usage_metadata_callback"``.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import get_usage_metadata_callback
llm = init_chat_model(model="openai:gpt-4o-mini")
with get_usage_metadata_callback() as cb:
llm.invoke("Hello")
llm.invoke("Goodbye")
print(cb.usage_metadata)
.. code-block:: none
{'output_token_details': {'audio': 0, 'reasoning': 0}, 'input_tokens': 17, 'output_tokens': 31, 'total_tokens': 48, 'input_token_details': {'cache_read': 0, 'audio': 0}}
.. versionadded:: 0.3.49
""" # noqa: E501
from langchain_core.tracers.context import register_configure_hook
usage_metadata_callback_var: ContextVar[Optional[UsageMetadataCallbackHandler]] = (
ContextVar(name, default=None)
)
register_configure_hook(usage_metadata_callback_var, True)
cb = UsageMetadataCallbackHandler()
usage_metadata_callback_var.set(cb)
yield cb
usage_metadata_callback_var.set(None)
|
"""Callback Handler that tracks AIMessage.usage_metadata."""
import threading
from collections.abc import Generator
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Optional
from langchain_core._api import beta
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import AIMessage
from langchain_core.messages.ai import UsageMetadata, add_usage
from langchain_core.outputs import ChatGeneration, LLMResult
@beta()
class UsageMetadataCallbackHandler(BaseCallbackHandler):
"""Callback Handler that tracks AIMessage.usage_metadata.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import UsageMetadataCallbackHandler
llm = init_chat_model(model="openai:gpt-4o-mini")
callback = UsageMetadataCallbackHandler()
results = llm.batch(["Hello", "Goodbye"], config={"callbacks": [callback]})
print(callback.usage_metadata)
.. code-block:: none
{'output_token_details': {'audio': 0, 'reasoning': 0}, 'input_tokens': 17, 'output_tokens': 31, 'total_tokens': 48, 'input_token_details': {'cache_read': 0, 'audio': 0}}
.. versionadded:: 0.3.49
""" # noqa: E501
def __init__(self) -> None:
super().__init__()
self._lock = threading.Lock()
self.usage_metadata: Optional[UsageMetadata] = None
def __repr__(self) -> str:
return str(self.usage_metadata)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Collect token usage."""
# Check for usage_metadata (langchain-core >= 0.2.2)
try:
generation = response.generations[0][0]
except IndexError:
generation = None
if isinstance(generation, ChatGeneration):
try:
message = generation.message
if isinstance(message, AIMessage):
usage_metadata = message.usage_metadata
else:
usage_metadata = None
except AttributeError:
usage_metadata = None
else:
usage_metadata = None
# update shared state behind lock
with self._lock:
self.usage_metadata = add_usage(self.usage_metadata, usage_metadata)
@contextmanager
@beta()
def get_usage_metadata_callback(
name: str = "usage_metadata_callback",
) -> Generator[UsageMetadataCallbackHandler, None, None]:
"""Get context manager for tracking usage metadata across chat model calls using
``AIMessage.usage_metadata``.
Args:
name (str): The name of the context variable. Defaults to
``"usage_metadata_callback"``.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import get_usage_metadata_callback
llm = init_chat_model(model="openai:gpt-4o-mini")
with get_usage_metadata_callback() as cb:
llm.invoke("Hello")
llm.invoke("Goodbye")
print(cb.usage_metadata)
.. code-block:: none
{'output_token_details': {'audio': 0, 'reasoning': 0}, 'input_tokens': 17, 'output_tokens': 31, 'total_tokens': 48, 'input_token_details': {'cache_read': 0, 'audio': 0}}
.. versionadded:: 0.3.49
""" # noqa: E501
from langchain_core.tracers.context import register_configure_hook
usage_metadata_callback_var: ContextVar[Optional[UsageMetadataCallbackHandler]] = (
ContextVar(name, default=None)
)
register_configure_hook(usage_metadata_callback_var, True)
cb = UsageMetadataCallbackHandler()
usage_metadata_callback_var.set(cb)
yield cb
usage_metadata_callback_var.set(None)
|
# Owner(s): ["oncall: distributed"]
import torch
import torch.nn as nn
from torch.distributed.checkpoint.state_dict import get_state_dict
from torch.distributed.device_mesh import _mesh_resources, init_device_mesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.tensor import DTensor
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class Dummymodel(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
raise NotImplementedError
class EPModel(nn.Module):
def __init__(self, rank):
super().__init__()
self.net1 = nn.Sequential(nn.Linear(16, 16), nn.ReLU())
self.net2 = nn.Sequential(nn.Linear(16, 16), nn.ReLU())
def forward(self, x):
raise NotImplementedError
class SecondTier(nn.Module):
def __init__(self, rank):
super().__init__()
self.ep_layers = nn.ModuleList(
[EPModel(rank) if rank % 4 == i else Dummymodel() for i in range(4)]
)
self.net = nn.Sequential(nn.Linear(16, 16), nn.ReLU())
def forward(self, x):
raise NotImplementedError
class TopModel(nn.Module):
def __init__(self, rank):
super().__init__()
torch.manual_seed(0)
self.second = SecondTier(rank)
self.net = nn.Sequential(nn.Linear(16, 16), nn.ReLU())
def forward(self, x):
raise NotImplementedError
class TestFSDPWithEP(DTensorTestBase, VerifyStateDictMixin):
@property
def world_size(self) -> int:
return min(8, torch.cuda.device_count())
@with_comms
@skip_if_lt_x_gpu(8)
@with_temp_dir
def test_e2e(self):
model = TopModel(self.rank).cuda()
mesh_fsdp_tp = init_device_mesh(
self.device_type, (2, 4), mesh_dim_names=("dp", "tp")
)
# TODO: we are using an internal API atm. Change to a public API once it is ready.
mesh_fsdp_ep = _mesh_resources.create_child_mesh(mesh_fsdp_tp, ("dp",))
del _mesh_resources.child_to_parent_mapping[mesh_fsdp_ep]
mesh_fsdp = init_device_mesh(self.device_type, (8,))
for i, l in enumerate(model.second.ep_layers):
model.second.ep_layers[i] = FSDP(
l, use_orig_params=True, device_mesh=mesh_fsdp_ep
)
model.second = FSDP(model.second, use_orig_params=True, device_mesh=mesh_fsdp)
model = FSDP(model, use_orig_params=True, device_mesh=mesh_fsdp)
optim = torch.optim.Adam(model.parameters(), lr=0.1)
msd, osd = get_state_dict(model, optim)
# FSDP only params
for key in (
"net.0.weight",
"net.0.bias",
"second.net.0.weight",
"second.net.0.bias",
):
msd_v = msd[key]
osd_v = osd["state"][key]["exp_avg"]
for v in (msd_v, osd_v):
self.assertTrue(isinstance(v, DTensor))
self.assertEqual(tuple(v.device_mesh.mesh), tuple(range(8)))
# FSDP/EP params
layer = self.rank % 4
ranks = (layer, layer + 4)
for i in range(4):
for key in (
f"second.ep_layers.{i}.net1.0.weight",
f"second.ep_layers.{i}.net1.0.bias",
f"second.ep_layers.{i}.net2.0.weight",
f"second.ep_layers.{i}.net2.0.bias",
):
if layer != i:
self.assertTrue(key not in msd)
else:
msd_v = msd[key]
osd_v = osd["state"][key]["exp_avg"]
for v in (msd_v, osd_v):
self.assertTrue(isinstance(v, DTensor))
self.assertEqual(tuple(v.device_mesh.mesh), ranks)
self.assertEqual(set(osd["state"].keys()), set(msd.keys()))
if __name__ == "__main__":
run_tests()
|
# Owner(s): ["oncall: distributed"]
import torch
import torch.nn as nn
from torch.distributed.checkpoint.state_dict import get_state_dict
from torch.distributed.device_mesh import _mesh_resources, init_device_mesh
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.tensor import DTensor
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorTestBase,
skip_if_lt_x_gpu,
with_comms,
)
from torch.testing._internal.distributed.checkpoint_utils import with_temp_dir
from torch.testing._internal.distributed.common_state_dict import VerifyStateDictMixin
class Dummymodel(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
raise NotImplementedError
class EPModel(nn.Module):
def __init__(self, rank):
super().__init__()
self.net1 = nn.Sequential(nn.Linear(16, 16), nn.ReLU())
self.net2 = nn.Sequential(nn.Linear(16, 16), nn.ReLU())
def forward(self, x):
raise NotImplementedError
class SecondTier(nn.Module):
def __init__(self, rank):
super().__init__()
self.ep_layers = nn.ModuleList(
[EPModel(rank) if rank % 4 == i else Dummymodel() for i in range(4)]
)
self.net = nn.Sequential(nn.Linear(16, 16), nn.ReLU())
def forward(self, x):
raise NotImplementedError
class TopModel(nn.Module):
def __init__(self, rank):
super().__init__()
torch.manual_seed(0)
self.second = SecondTier(rank)
self.net = nn.Sequential(nn.Linear(16, 16), nn.ReLU())
def forward(self, x):
raise NotImplementedError
class TestFSDPWithEP(DTensorTestBase, VerifyStateDictMixin):
@property
def world_size(self) -> int:
return min(8, torch.cuda.device_count())
@with_comms
@skip_if_lt_x_gpu(8)
@with_temp_dir
def test_e2e(self):
model = TopModel(self.rank).cuda()
mesh_fsdp_tp = init_device_mesh(
self.device_type, (2, 4), mesh_dim_names=("dp", "tp")
)
# TODO: we are using an internal API atm. Change to a publich API once it is ready.
mesh_fsdp_ep = _mesh_resources.create_child_mesh(mesh_fsdp_tp, ("dp",))
del _mesh_resources.child_to_parent_mapping[mesh_fsdp_ep]
mesh_fsdp = init_device_mesh(self.device_type, (8,))
for i, l in enumerate(model.second.ep_layers):
model.second.ep_layers[i] = FSDP(
l, use_orig_params=True, device_mesh=mesh_fsdp_ep
)
model.second = FSDP(model.second, use_orig_params=True, device_mesh=mesh_fsdp)
model = FSDP(model, use_orig_params=True, device_mesh=mesh_fsdp)
optim = torch.optim.Adam(model.parameters(), lr=0.1)
msd, osd = get_state_dict(model, optim)
# FSDP only params
for key in (
"net.0.weight",
"net.0.bias",
"second.net.0.weight",
"second.net.0.bias",
):
msd_v = msd[key]
osd_v = osd["state"][key]["exp_avg"]
for v in (msd_v, osd_v):
self.assertTrue(isinstance(v, DTensor))
self.assertEqual(tuple(v.device_mesh.mesh), tuple(range(8)))
# FSDP/EP params
layer = self.rank % 4
ranks = (layer, layer + 4)
for i in range(4):
for key in (
f"second.ep_layers.{i}.net1.0.weight",
f"second.ep_layers.{i}.net1.0.bias",
f"second.ep_layers.{i}.net2.0.weight",
f"second.ep_layers.{i}.net2.0.bias",
):
if layer != i:
self.assertTrue(key not in msd)
else:
msd_v = msd[key]
osd_v = osd["state"][key]["exp_avg"]
for v in (msd_v, osd_v):
self.assertTrue(isinstance(v, DTensor))
self.assertEqual(tuple(v.device_mesh.mesh), ranks)
self.assertEqual(set(osd["state"].keys()), set(msd.keys()))
if __name__ == "__main__":
run_tests()
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
from mmdet.utils import get_device
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# In distributed sampling, different ranks should sample
# non-overlapped data in the dataset. Therefore, this function
# is used to make sure that each rank shuffles the data indices
# in the same order based on the same seed. Then different ranks
# could use different indices to select non-overlapped data from the
# same data list.
device = get_device()
self.seed = sync_random_seed(seed, device)
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
# When :attr:`shuffle=True`, this ensures all replicas
# use a different random ordering for each epoch.
# Otherwise, the next iteration of this sampler will
# yield the same ordering.
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
from mmdet.utils import get_device
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# In distributed sampling, different ranks should sample
# non-overlapped data in the dataset. Therefore, this function
# is used to make sure that each rank shuffles the data indices
# in the same order based on the same seed. Then different ranks
# could use different indices to select non-overlapped data from the
# same data list.
device = get_device()
self.seed = sync_random_seed(seed, device)
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
# When :attr:`shuffle=True`, this ensures all replicas
# use a different random ordering for each epoch.
# Otherwise, the next iteration of this sampler will
# yield the same ordering.
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
import pytest
from jina import Executor, Flow, requests
@pytest.fixture()
def get_executor():
class DummyExecutor(Executor):
@requests(on='/foo')
def foo(self, docs, **kwargs): ...
return DummyExecutor
def test_disable_monitoring_on_pods(port_generator, get_executor):
port0 = port_generator()
port1 = port_generator()
f = Flow(monitoring=True, port_monitoring=port0).add(
uses=get_executor(),
port_monitoring=port1,
monitoring=False,
)
f = f.build()
assert f._deployment_nodes['gateway'].pod_args['pods'][0][0].monitoring
assert not f._deployment_nodes['executor0'].pod_args['pods'][0][0].monitoring
def test_disable_monitoring_on_gatway_only(port_generator, get_executor):
port0 = port_generator()
port1 = port_generator()
f = Flow(monitoring=False, port_monitoring=port0).add(
uses=get_executor(),
port_monitoring=port1,
monitoring=True,
)
f = f.build()
assert not f._deployment_nodes['gateway'].pod_args['pods'][0][0].monitoring
assert f._deployment_nodes['executor0'].pod_args['pods'][0][0].monitoring
|
import pytest
from jina import Executor, Flow, requests
@pytest.fixture()
def get_executor():
class DummyExecutor(Executor):
@requests(on='/foo')
def foo(self, docs, **kwargs):
...
return DummyExecutor
def test_disable_monitoring_on_pods(port_generator, get_executor):
port0 = port_generator()
port1 = port_generator()
f = Flow(monitoring=True, port_monitoring=port0).add(
uses=get_executor(),
port_monitoring=port1,
monitoring=False,
)
f = f.build()
assert f._deployment_nodes['gateway'].pod_args['pods'][0][0].monitoring
assert not f._deployment_nodes['executor0'].pod_args['pods'][0][0].monitoring
def test_disable_monitoring_on_gatway_only(port_generator, get_executor):
port0 = port_generator()
port1 = port_generator()
f = Flow(monitoring=False, port_monitoring=port0).add(
uses=get_executor(),
port_monitoring=port1,
monitoring=True,
)
f = f.build()
assert not f._deployment_nodes['gateway'].pod_args['pods'][0][0].monitoring
assert f._deployment_nodes['executor0'].pod_args['pods'][0][0].monitoring
|
"""
Prompts for implementing Chain of Abstraction.
While official prompts are not given (and the paper finetunes models for the task),
we can take inspiration and use few-shot prompting to generate a prompt for implementing
chain of abstraction in an LLM agent.
"""
REASONING_PROMPT_TEMPALTE = """Generate an abstract plan of reasoning using placeholders for the specific values and function calls needed.
The placeholders should be labeled y1, y2, etc.
Function calls should be represented as inline strings like [FUNC {{function_name}}({{input1}}, {{input2}}, ...) = {{output_placeholder}}].
Assume someone will read the plan after the functions have been executed in order to make a final response.
Not every question will require function calls to answer.
If you do invoke a function, only use the available functions, do not make up functions.
Example:
-----------
Available functions:
```python
def add(a: int, b: int) -> int:
\"\"\"Add two numbers together.\"\"\"
...
def multiply(a: int, b: int) -> int:
\"\"\"Multiply two numbers together.\"\"\"
...
```
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Abstract plan of reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = y1] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(y1, 3) = y2] apples.
Your Turn:
-----------
Available functions:
```python
{functions}
```
Question:
{question}
Abstract plan of reasoning:
"""
REFINE_REASONING_PROMPT_TEMPALTE = """Generate a response to a question by using a previous abstract plan of reasoning. Use the previous reasoning as context to write a response to the question.
Example:
-----------
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Previous reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = 5] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(5, 3) = 15] apples.
Response:
After the wizard casts the spell, Sally has 15 apples.
Your Turn:
-----------
Question:
{question}
Previous reasoning:
{prev_reasoning}
Response:
"""
|
"""
Prompts for implementing Chain of Abstraction.
While official prompts are not given (and the paper finetunes models for the task),
we can take inspiration and use few-shot prompting to generate a prompt for implementing
chain of abstraction in an LLM agent.
"""
REASONING_PROMPT_TEMPALTE = """Generate an abstract plan of reasoning using placeholders for the specific values and function calls needed.
The placeholders should be labeled y1, y2, etc.
Function calls should be represented as inline strings like [FUNC {{function_name}}({{input1}}, {{input2}}, ...) = {{output_placeholder}}].
Assume someone will read the plan after the functions have been executed in order to make a final response.
Not every question will require function calls to answer.
If you do invoke a function, only use the available functions, do not make up functions.
Example:
-----------
Available functions:
```python
def add(a: int, b: int) -> int:
\"\"\"Add two numbers together.\"\"\"
...
def multiply(a: int, b: int) -> int:
\"\"\"Multiply two numbers together.\"\"\"
...
```
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Abstract plan of reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = y1] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(y1, 3) = y2] apples.
Your Turn:
-----------
Available functions:
```python
{functions}
```
Question:
{question}
Abstract plan of reasoning:
"""
REFINE_REASONING_PROMPT_TEMPALTE = """Generate a response to a question by using a previous abstract plan of reasoning. Use the previous reasoning as context to write a response to the question.
Example:
-----------
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Previous reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = 5] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(5, 3) = 15] apples.
Response:
After the wizard casts the spell, Sally has 15 apples.
Your Turn:
-----------
Question:
{question}
Previous reasoning:
{prev_reasoning}
Response:
"""
|
"""DeepLake reader."""
from typing import List, Optional, Union
import numpy as np
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
distance_metric_map = {
"l2": lambda a, b: np.linalg.norm(a - b, axis=1, ord=2),
"l1": lambda a, b: np.linalg.norm(a - b, axis=1, ord=1),
"max": lambda a, b: np.linalg.norm(a - b, axis=1, ord=np.inf),
"cos": lambda a, b: np.dot(a, b.T)
/ (np.linalg.norm(a) * np.linalg.norm(b, axis=1)),
"dot": lambda a, b: np.dot(a, b.T),
}
def vector_search(
query_vector: Union[List, np.ndarray],
data_vectors: np.ndarray,
distance_metric: str = "l2",
limit: Optional[int] = 4,
) -> List:
"""
Naive search for nearest neighbors
args:
query_vector: Union[List, np.ndarray]
data_vectors: np.ndarray
limit (int): number of nearest neighbors
distance_metric: distance function 'L2' for Euclidean, 'L1' for Nuclear, 'Max'
l-infinity distance, 'cos' for cosine similarity, 'dot' for dot product
returns:
nearest_indices: List, indices of nearest neighbors.
"""
# Calculate the distance between the query_vector and all data_vectors
if isinstance(query_vector, list):
query_vector = np.array(query_vector)
query_vector = query_vector.reshape(1, -1)
distances = distance_metric_map[distance_metric](query_vector, data_vectors)
nearest_indices = np.argsort(distances)
nearest_indices = (
nearest_indices[::-1][:limit]
if distance_metric in ["cos"]
else nearest_indices[:limit]
)
return nearest_indices.tolist()
class DeepLakeReader(BaseReader):
"""
DeepLake reader.
Retrieve documents from existing DeepLake datasets.
Args:
dataset_name: Name of the deeplake dataset.
"""
def __init__(
self,
token: Optional[str] = None,
):
"""Initializing the deepLake reader."""
import_err_msg = (
"`deeplake` package not found, please run `pip install deeplake`"
)
try:
import deeplake # noqa
except ImportError:
raise ImportError(import_err_msg)
self.token = token
def load_data(
self,
query_vector: List[float],
dataset_path: str,
limit: int = 4,
distance_metric: str = "l2",
) -> List[Document]:
"""
Load data from DeepLake.
Args:
dataset_name (str): Name of the DeepLake dataset.
query_vector (List[float]): Query vector.
limit (int): Number of results to return.
Returns:
List[Document]: A list of documents.
"""
import deeplake
from deeplake.util.exceptions import TensorDoesNotExistError
dataset = deeplake.load(dataset_path, token=self.token)
try:
embeddings = dataset.embedding.numpy(fetch_chunks=True)
except Exception:
raise TensorDoesNotExistError("embedding")
indices = vector_search(
query_vector, embeddings, distance_metric=distance_metric, limit=limit
)
documents = []
for idx in indices:
document = Document(
text=str(dataset[idx].text.numpy().tolist()[0]),
id_=dataset[idx].ids.numpy().tolist()[0],
)
documents.append(document)
return documents
|
"""DeepLake reader."""
from typing import List, Optional, Union
import numpy as np
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
distance_metric_map = {
"l2": lambda a, b: np.linalg.norm(a - b, axis=1, ord=2),
"l1": lambda a, b: np.linalg.norm(a - b, axis=1, ord=1),
"max": lambda a, b: np.linalg.norm(a - b, axis=1, ord=np.inf),
"cos": lambda a, b: np.dot(a, b.T)
/ (np.linalg.norm(a) * np.linalg.norm(b, axis=1)),
"dot": lambda a, b: np.dot(a, b.T),
}
def vector_search(
query_vector: Union[List, np.ndarray],
data_vectors: np.ndarray,
distance_metric: str = "l2",
limit: Optional[int] = 4,
) -> List:
"""
Naive search for nearest neighbors
args:
query_vector: Union[List, np.ndarray]
data_vectors: np.ndarray
limit (int): number of nearest neighbors
distance_metric: distance function 'L2' for Euclidean, 'L1' for Nuclear, 'Max'
l-infinity distance, 'cos' for cosine similarity, 'dot' for dot product
returns:
nearest_indices: List, indices of nearest neighbors.
"""
# Calculate the distance between the query_vector and all data_vectors
if isinstance(query_vector, list):
query_vector = np.array(query_vector)
query_vector = query_vector.reshape(1, -1)
distances = distance_metric_map[distance_metric](query_vector, data_vectors)
nearest_indices = np.argsort(distances)
nearest_indices = (
nearest_indices[::-1][:limit]
if distance_metric in ["cos"]
else nearest_indices[:limit]
)
return nearest_indices.tolist()
class DeepLakeReader(BaseReader):
"""
DeepLake reader.
Retrieve documents from existing DeepLake datasets.
Args:
dataset_name: Name of the deeplake dataset.
"""
def __init__(
self,
token: Optional[str] = None,
):
"""Initializing the deepLake reader."""
import_err_msg = (
"`deeplake` package not found, please run `pip install deeplake`"
)
try:
import deeplake # noqa
except ImportError:
raise ImportError(import_err_msg)
self.token = token
def load_data(
self,
query_vector: List[float],
dataset_path: str,
limit: int = 4,
distance_metric: str = "l2",
) -> List[Document]:
"""
Load data from DeepLake.
Args:
dataset_name (str): Name of the DeepLake dataset.
query_vector (List[float]): Query vector.
limit (int): Number of results to return.
Returns:
List[Document]: A list of documents.
"""
import deeplake
from deeplake.util.exceptions import TensorDoesNotExistError
dataset = deeplake.load(dataset_path, token=self.token)
try:
embeddings = dataset.embedding.numpy(fetch_chunks=True)
except Exception:
raise TensorDoesNotExistError("embedding")
indices = vector_search(
query_vector, embeddings, distance_metric=distance_metric, limit=limit
)
documents = []
for idx in indices:
document = Document(
text=str(dataset[idx].text.numpy().tolist()[0]),
id_=dataset[idx].ids.numpy().tolist()[0],
)
documents.append(document)
return documents
|
import numpy as np
def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int:
"""Return the number of possible shards according to the input gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
# until we decide how to define sharding without ambiguity for users
lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)}
if len(set(lists_lengths.values())) > 1:
raise RuntimeError(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items())
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
)
max_length = max(lists_lengths.values(), default=0)
return max(1, max_length)
def _distribute_shards(num_shards: int, max_num_jobs: int) -> list[range]:
"""
Get the range of shard indices per job.
If num_shards<max_num_jobs, then num_shards jobs are given a range of one shard.
The shards indices order is preserved: e.g. all the first shards are given the first job.
Moreover all the jobs are given approximately the same number of shards.
Example:
```python
>>> _distribute_shards(2, max_num_jobs=4)
[range(0, 1), range(1, 2)]
>>> _distribute_shards(10, max_num_jobs=3)
[range(0, 4), range(4, 7), range(7, 10)]
```
"""
shards_indices_per_group = []
for group_idx in range(max_num_jobs):
num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
shard_indices = range(start, start + num_shards_to_add)
shards_indices_per_group.append(shard_indices)
return shards_indices_per_group
def _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> list[dict]:
"""Split the gen_kwargs into `max_num_job` gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs)
if num_shards == 1:
return [dict(gen_kwargs)]
else:
shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs)
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(value, list)
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(shard_indices_per_group))
]
def _merge_gen_kwargs(gen_kwargs_list: list[dict]) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key], list)
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict:
"""Return a shuffled copy of the input gen_kwargs"""
# We must shuffle all the lists, and lists of the same size must have the same shuffling.
# This way entangled lists of (shard, shard_metadata) are still in the right order.
# First, let's generate the shuffled indices per list size
list_sizes = {len(value) for value in gen_kwargs.values() if isinstance(value, list)}
indices_per_size = {}
for size in list_sizes:
indices_per_size[size] = list(range(size))
rng.shuffle(indices_per_size[size])
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
shuffled_kwargs = dict(gen_kwargs)
for key, value in shuffled_kwargs.items():
if isinstance(value, list):
shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]]
return shuffled_kwargs
|
from typing import List
import numpy as np
def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int:
"""Return the number of possible shards according to the input gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
# until we decide how to define sharding without ambiguity for users
lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)}
if len(set(lists_lengths.values())) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items())
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
)
)
max_length = max(lists_lengths.values(), default=0)
return max(1, max_length)
def _distribute_shards(num_shards: int, max_num_jobs: int) -> List[range]:
"""
Get the range of shard indices per job.
If num_shards<max_num_jobs, then num_shards jobs are given a range of one shard.
The shards indices order is preserved: e.g. all the first shards are given the first job.
Moreover all the jobs are given approximately the same number of shards.
Example:
```python
>>> _distribute_shards(2, max_num_jobs=4)
[range(0, 1), range(1, 2)]
>>> _distribute_shards(10, max_num_jobs=3)
[range(0, 4), range(4, 7), range(7, 10)]
```
"""
shards_indices_per_group = []
for group_idx in range(max_num_jobs):
num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
shard_indices = range(start, start + num_shards_to_add)
shards_indices_per_group.append(shard_indices)
return shards_indices_per_group
def _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> List[dict]:
"""Split the gen_kwargs into `max_num_job` gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs)
if num_shards == 1:
return [dict(gen_kwargs)]
else:
shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs)
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(value, list)
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(shard_indices_per_group))
]
def _merge_gen_kwargs(gen_kwargs_list: List[dict]) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key], list)
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict:
"""Return a shuffled copy of the input gen_kwargs"""
# We must shuffle all the lists, and lists of the same size must have the same shuffling.
# This way entangled lists of (shard, shard_metadata) are still in the right order.
# First, let's generate the shuffled indices per list size
list_sizes = {len(value) for value in gen_kwargs.values() if isinstance(value, list)}
indices_per_size = {}
for size in list_sizes:
indices_per_size[size] = list(range(size))
rng.shuffle(indices_per_size[size])
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
shuffled_kwargs = dict(gen_kwargs)
for key, value in shuffled_kwargs.items():
if isinstance(value, list):
shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]]
return shuffled_kwargs
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import ImageBytes
from docarray.utils._internal.misc import is_tf_available
from docarray.utils._internal.pydantic import is_pydantic_v2
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = ImageDoc(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_image_str():
image = parse_obj_as(ImageDoc, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_image_np():
image = parse_obj_as(ImageDoc, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_image_torch():
image = parse_obj_as(ImageDoc, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_image_tensorflow():
image = ImageDoc(tensor=tf.zeros((10, 10, 3)))
assert tnp.allclose(image.tensor.tensor, tf.zeros((10, 10, 3)))
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_image_shortcut_doc():
class MyDoc(BaseDoc):
image: ImageDoc
image2: ImageDoc
image3: ImageDoc
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = ImageDoc(url=REMOTE_JPG)
img.bytes_ = img.url.load_bytes()
assert isinstance(img.bytes_, ImageBytes)
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = ImageDoc(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes_ = img.tensor.to_bytes()
assert isinstance(img.bytes_, bytes)
assert isinstance(img.bytes_, ImageBytes)
assert len(img.bytes_) > 0
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import ImageBytes
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = ImageDoc(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(ImageDoc, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(ImageDoc, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(ImageDoc, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_image_tensorflow():
image = ImageDoc(tensor=tf.zeros((10, 10, 3)))
assert tnp.allclose(image.tensor.tensor, tf.zeros((10, 10, 3)))
def test_image_shortcut_doc():
class MyDoc(BaseDoc):
image: ImageDoc
image2: ImageDoc
image3: ImageDoc
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = ImageDoc(url=REMOTE_JPG)
img.bytes_ = img.url.load_bytes()
assert isinstance(img.bytes_, ImageBytes)
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = ImageDoc(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes_ = img.tensor.to_bytes()
assert isinstance(img.bytes_, bytes)
assert isinstance(img.bytes_, ImageBytes)
assert len(img.bytes_) > 0
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
default_traversal_paths: Iterable[str] = ('r',),
device: str = 'cpu',
*args,
**kwargs
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param default_traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.aclp = AudioCLIP(pretrained=model_path).to(device).eval()
self.default_traversal_paths = default_traversal_paths
@requests
def encode(
self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs
) -> Any:
if docs:
cleaned_document_array = self._get_input_data(docs, parameters)
self._create_embeddings(cleaned_document_array)
def _get_input_data(self, docs: DocumentArray, parameters: dict):
"""Create a filtered set of Documents to iterate over."""
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
# traverse thought all documents which have to be processed
flat_docs = docs.traverse_flat(traversal_paths)
# filter out documents without audio wav
filtered_docs = DocumentArray(
[doc for doc in flat_docs if doc.blob is not None]
)
return filtered_docs
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.aclp.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
:param model_path: path of the pre-trained AudioCLIP model
:param default_traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
default_traversal_paths: Iterable[str] = ('r',),
device: str = 'cpu',
*args,
**kwargs
):
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.aclp = AudioCLIP(pretrained=model_path).to(device).eval()
self.default_traversal_paths = default_traversal_paths
@requests
def encode(
self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs
) -> Any:
if docs:
cleaned_document_array = self._get_input_data(docs, parameters)
self._create_embeddings(cleaned_document_array)
def _get_input_data(self, docs: DocumentArray, parameters: dict):
"""Create a filtered set of Documents to iterate over."""
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
# traverse thought all documents which have to be processed
flat_docs = docs.traverse_flat(traversal_paths)
# filter out documents without audio wav
filtered_docs = DocumentArray(
[doc for doc in flat_docs if doc.blob is not None]
)
return filtered_docs
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.aclp.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray import BaseDoc, DocList
from docarray.base_doc import AnyDoc
def test_generic_init():
class Text(BaseDoc):
text: str
da = DocList[Text]([])
da.doc_type == Text
assert isinstance(da, DocList)
def test_normal_access_init():
da = DocList([])
da.doc_type == AnyDoc
assert isinstance(da, DocList)
|
from docarray import BaseDoc, DocList
from docarray.base_doc import AnyDoc
def test_generic_init():
class Text(BaseDoc):
text: str
da = DocList[Text]([])
da.doc_type == Text
assert isinstance(da, DocList)
def test_normal_access_init():
da = DocList([])
da.doc_type == AnyDoc
assert isinstance(da, DocList)
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CEBinaryAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 1 outputs. It measure the
accuracy of the predict class vs. the gold labels. It uses a fixed threshold to determine the label (0 vs 1).
See CEBinaryClassificationEvaluator for an evaluator that determines automatically the optimal threshold.
"""
def __init__(
self,
sentence_pairs: list[list[str]],
labels: list[int],
name: str = "",
threshold: float = 0.5,
write_csv: bool = True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.threshold = threshold
self.csv_file = "CEBinaryAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CEBinaryAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = pred_scores > self.threshold
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info("Accuracy: {:.2f}".format(acc * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
import csv
import logging
import os
from typing import List
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CEBinaryAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 1 outputs. It measure the
accuracy of the predict class vs. the gold labels. It uses a fixed threshold to determine the label (0 vs 1).
See CEBinaryClassificationEvaluator for an evaluator that determines automatically the optimal threshold.
"""
def __init__(
self,
sentence_pairs: List[List[str]],
labels: List[int],
name: str = "",
threshold: float = 0.5,
write_csv: bool = True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.threshold = threshold
self.csv_file = "CEBinaryAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CEBinaryAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = pred_scores > self.threshold
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info("Accuracy: {:.2f}".format(acc * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
from __future__ import annotations
from sentence_transformers.similarity_functions import SimilarityFunction
__all__ = ["SimilarityFunction"]
|
from sentence_transformers.similarity_functions import SimilarityFunction
__all__ = ["SimilarityFunction"]
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling3D", "keras.layers.AvgPool3D"])
class AveragePooling3D(BasePooling):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Downsamples the input along its spatial dimensions (depth, height, and
width) by taking the average value over an input window (of size defined by
`pool_size`) for each channel of the input. The window is shifted by
`strides` along each dimension.
Args:
pool_size: int or tuple of 3 integers, factors by which to downscale
(dim1, dim2, dim3). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 3 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
`"channels_first"` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Example:
```python
depth = 30
height = 30
width = 30
channels = 3
inputs = keras.layers.Input(shape=(depth, height, width, channels))
layer = keras.layers.AveragePooling3D(pool_size=3)
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
```
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=3,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling3D", "keras.layers.AvgPool3D"])
class AveragePooling3D(BasePooling):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Downsamples the input along its spatial dimensions (depth, height, and
width) by taking the average value over an input window (of size defined by
`pool_size`) for each channel of the input. The window is shifted by
`strides` along each dimension.
Args:
pool_size: int or tuple of 3 integers, factors by which to downscale
(dim1, dim2, dim3). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 3 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
`"channels_first"` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Example:
```python
depth = 30
height = 30
width = 30
channels = 3
inputs = keras.layers.Input(shape=(depth, height, width, channels))
layer = keras.layers.AveragePooling3D(pool_size=3)
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
```
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=3,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
"""Test pydantic output parser."""
import pytest
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.output_parsers.pydantic import PydanticOutputParser
from llama_index.core.llms import ChatMessage, TextBlock, ImageBlock
class AttrDict(BaseModel):
test_attr: str
foo: int
class TestModel(BaseModel):
__test__ = False
title: str
attr_dict: AttrDict
def test_pydantic() -> None:
"""Test pydantic output parser."""
output = """\
Here is the valid JSON:
{
"title": "TestModel",
"attr_dict": {
"test_attr": "test_attr",
"foo": 2
}
}
"""
parser = PydanticOutputParser(output_cls=TestModel)
parsed_output = parser.parse(output)
assert isinstance(parsed_output, TestModel)
assert parsed_output.title == "TestModel"
assert isinstance(parsed_output.attr_dict, AttrDict)
assert parsed_output.attr_dict.test_attr == "test_attr"
assert parsed_output.attr_dict.foo == 2
# TODO: figure out testing conditions
with pytest.raises(ValueError):
output = "hello world"
parsed_output = parser.parse(output)
def test_pydantic_format() -> None:
"""Test pydantic format."""
query = "hello world"
parser = PydanticOutputParser(output_cls=AttrDict)
formatted_query = parser.format(query)
assert "hello world" in formatted_query
def test_pydantic_format_with_blocks() -> None:
"""Test pydantic format with blocks."""
parser = PydanticOutputParser(output_cls=AttrDict)
messages = [
ChatMessage(
role="user",
blocks=[
TextBlock(text="hello world"),
ImageBlock(
url="https://pbs.twimg.com/media/GVhGD1PXkAANfPV?format=jpg&name=4096x4096"
),
TextBlock(text="hello world"),
],
)
]
formatted_messages = parser.format_messages(messages)
assert "hello world" in formatted_messages[0].blocks[-1].text
|
"""Test pydantic output parser."""
import pytest
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.output_parsers.pydantic import PydanticOutputParser
class AttrDict(BaseModel):
test_attr: str
foo: int
class TestModel(BaseModel):
__test__ = False
title: str
attr_dict: AttrDict
def test_pydantic() -> None:
"""Test pydantic output parser."""
output = """\
Here is the valid JSON:
{
"title": "TestModel",
"attr_dict": {
"test_attr": "test_attr",
"foo": 2
}
}
"""
parser = PydanticOutputParser(output_cls=TestModel)
parsed_output = parser.parse(output)
assert isinstance(parsed_output, TestModel)
assert parsed_output.title == "TestModel"
assert isinstance(parsed_output.attr_dict, AttrDict)
assert parsed_output.attr_dict.test_attr == "test_attr"
assert parsed_output.attr_dict.foo == 2
# TODO: figure out testing conditions
with pytest.raises(ValueError):
output = "hello world"
parsed_output = parser.parse(output)
def test_pydantic_format() -> None:
"""Test pydantic format."""
query = "hello world"
parser = PydanticOutputParser(output_cls=AttrDict)
formatted_query = parser.format(query)
assert "hello world" in formatted_query
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(plugins=[
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='1111',
kv_stride=2),
stages=(False, False, True, True),
position='after_conv2')
]))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(plugins=[
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='1111',
kv_stride=2),
stages=(False, False, True, True),
position='after_conv2')
]))
|
from urllib.parse import urlparse, urlunparse
import pytest
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank as Interface
from llama_index.postprocessor.nvidia_rerank.utils import BASE_URL
import respx
@pytest.fixture()
def mock_v1_local_models2(respx_mock: respx.MockRouter, base_url: str) -> None:
parsed = urlparse(base_url)
normalized_path = parsed.path.rstrip("/")
if not normalized_path.endswith("/v1"):
normalized_path += "/v1"
base_url = urlunparse(
(parsed.scheme, parsed.netloc, normalized_path, None, None, None)
)
# Intercept GET call for retrieving models using httpx.
respx_mock.get(f"{base_url}/models").respond(
json={
"data": [
{
"id": "model1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
},
]
}
)
# marking as skip because base_url validation is removed
@pytest.mark.skip(reason="base_url validation is removed")
@pytest.mark.parametrize(
"base_url",
[
"http://0.0.0.0:8888/rankings",
"http://0.0.0.0:8888/ranking",
"http://test_url/.../v1",
"https://test_url/.../v1",
],
)
def test_base_url_invalid_not_hosted(
base_url: str, mock_v1_local_models2: None
) -> None:
parsed = urlparse(base_url)
normalized_path = parsed.path.rstrip("/")
# Expect a warning if the URL does NOT already end with "/v1"
if not normalized_path.endswith("/v1"):
with pytest.warns(UserWarning, match="does not end in /v1"):
client = Interface(base_url=base_url)
else:
client = Interface(base_url=base_url)
# Assert that the client's base_url is normalized to end with '/v1'
assert client.base_url.endswith("/v1")
# Updated test for valid non-hosted URL.
@pytest.mark.parametrize(
"base_url",
[
"http://0.0.0.0:8888/v1",
],
)
def test_base_url_valid_not_hosted(base_url: str, mock_v1_local_models2: None) -> None:
# The default model warning is expected in non-hosted mode
with pytest.warns(UserWarning, match="Default model is set") as record:
client = Interface(base_url=base_url)
# Also verify the base_url remains normalized (unchanged in this case)
assert client.base_url.endswith("/v1")
# Updated test for hosted base URL.
@pytest.mark.parametrize(
"base_url",
[BASE_URL],
)
def test_base_url_valid_hosted(base_url: str, mock_v1_local_models2: None) -> None:
client = Interface(base_url=base_url, api_key="BOGUS")
assert client._is_hosted
# Hosted client should use the provided base_url exactly.
assert client.base_url == base_url
# Updated test for proxy base URLs.
@pytest.mark.parametrize(
"base_url",
[
"http://host/path0/path1/path2/v1",
"http://host:123/path0/path1/path2/v1",
],
)
def test_proxy_base_url(base_url: str, mock_v1_local_models2: None) -> None:
client = Interface(api_key="NO_API_KEY_PROVIDED", base_url=base_url)
assert not client._is_hosted
# Since the URL is already normalized, verify it remains unchanged.
assert client.base_url == base_url
# marking as skip because base_url validation is removed
@pytest.mark.skip(reason="base_url validation is removed")
@pytest.mark.parametrize(
"base_url",
[
"bogus",
"http:/",
"http://",
"http:/oops",
],
)
def test_param_base_url_negative(base_url: str, monkeypatch) -> None:
monkeypatch.setenv("NVIDIA_API_KEY", "valid_api_key")
with pytest.raises(ValueError) as e:
Interface(model="model1", base_url=base_url)
assert "Invalid base_url" in str(e.value)
|
from urllib.parse import urlparse, urlunparse
import pytest
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank as Interface
from llama_index.postprocessor.nvidia_rerank.utils import BASE_URL
import respx
@pytest.fixture()
def mock_v1_local_models2(respx_mock: respx.MockRouter, base_url: str) -> None:
parsed = urlparse(base_url)
normalized_path = parsed.path.rstrip("/")
if not normalized_path.endswith("/v1"):
normalized_path += "/v1"
base_url = urlunparse(
(parsed.scheme, parsed.netloc, normalized_path, None, None, None)
)
# Intercept GET call for retrieving models using httpx.
respx_mock.get(f"{base_url}/models").respond(
json={
"data": [
{
"id": "model1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "model1",
},
]
}
)
# Updated test for non-hosted URLs that may need normalization.
@pytest.mark.parametrize(
"base_url",
[
"http://0.0.0.0:8888/rankings",
"http://0.0.0.0:8888/ranking",
"http://test_url/.../v1",
"https://test_url/.../v1",
],
)
def test_base_url_invalid_not_hosted(
base_url: str, mock_v1_local_models2: None
) -> None:
parsed = urlparse(base_url)
normalized_path = parsed.path.rstrip("/")
# Expect a warning if the URL does NOT already end with "/v1"
if not normalized_path.endswith("/v1"):
with pytest.warns(UserWarning, match="does not end in /v1"):
client = Interface(base_url=base_url)
else:
client = Interface(base_url=base_url)
# Assert that the client's base_url is normalized to end with '/v1'
assert client.base_url.endswith("/v1")
# Updated test for valid non-hosted URL.
@pytest.mark.parametrize(
"base_url",
[
"http://0.0.0.0:8888/v1",
],
)
def test_base_url_valid_not_hosted(base_url: str, mock_v1_local_models2: None) -> None:
# The default model warning is expected in non-hosted mode
with pytest.warns(UserWarning, match="Default model is set") as record:
client = Interface(base_url=base_url)
# Also verify the base_url remains normalized (unchanged in this case)
assert client.base_url.endswith("/v1")
# Updated test for hosted base URL.
@pytest.mark.parametrize(
"base_url",
[BASE_URL],
)
def test_base_url_valid_hosted(base_url: str, mock_v1_local_models2: None) -> None:
client = Interface(base_url=base_url, api_key="BOGUS")
assert client._is_hosted
# Hosted client should use the provided base_url exactly.
assert client.base_url == base_url
# Updated test for proxy base URLs.
@pytest.mark.parametrize(
"base_url",
[
"http://host/path0/path1/path2/v1",
"http://host:123/path0/path1/path2/v1",
],
)
def test_proxy_base_url(base_url: str, mock_v1_local_models2: None) -> None:
client = Interface(api_key="NO_API_KEY_PROVIDED", base_url=base_url)
assert not client._is_hosted
# Since the URL is already normalized, verify it remains unchanged.
assert client.base_url == base_url
@pytest.mark.parametrize(
"base_url",
[
"bogus",
"http:/",
"http://",
"http:/oops",
],
)
def test_param_base_url_negative(base_url: str, monkeypatch) -> None:
monkeypatch.setenv("NVIDIA_API_KEY", "valid_api_key")
with pytest.raises(ValueError) as e:
Interface(model="model1", base_url=base_url)
assert "Invalid base_url" in str(e.value)
|
"""Standard LangChain interface tests"""
from pathlib import Path
from typing import Literal, cast
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_anthropic import ChatAnthropic
REPO_ROOT_DIR = Path(__file__).parents[5]
class TestAnthropicStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": "claude-3-5-sonnet-latest"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_pdf_inputs(self) -> bool:
return True
@property
def supports_image_tool_message(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> dict[
Literal["invoke", "stream"],
list[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {
"invoke": ["cache_read_input", "cache_creation_input"],
"stream": ["cache_read_input", "cache_creation_input"],
}
def invoke_with_cache_creation_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620", # type: ignore[call-arg]
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}, # type: ignore[call-arg]
)
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620", # type: ignore[call-arg]
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}, # type: ignore[call-arg]
)
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
# invoke twice so first invocation is cached
_invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
def _invoke(llm: ChatAnthropic, input_: list, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
|
"""Standard LangChain interface tests"""
from pathlib import Path
from typing import Literal, cast
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_anthropic import ChatAnthropic
REPO_ROOT_DIR = Path(__file__).parents[5]
class TestAnthropicStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": "claude-3-5-sonnet-latest"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_tool_message(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> dict[
Literal["invoke", "stream"],
list[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {
"invoke": ["cache_read_input", "cache_creation_input"],
"stream": ["cache_read_input", "cache_creation_input"],
}
def invoke_with_cache_creation_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620", # type: ignore[call-arg]
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}, # type: ignore[call-arg]
)
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620", # type: ignore[call-arg]
extra_headers={"anthropic-beta": "prompt-caching-2024-07-31"}, # type: ignore[call-arg]
)
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
# invoke twice so first invocation is cached
_invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
}
],
}
],
stream,
)
def _invoke(llm: ChatAnthropic, input_: list, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.core.bbox.coder import (DeltaXYWHBBoxCoder, TBLRBBoxCoder,
YOLOBBoxCoder)
def test_yolo_bbox_coder():
coder = YOLOBBoxCoder()
bboxes = torch.Tensor([[-42., -29., 74., 61.], [-10., -29., 106., 61.],
[22., -29., 138., 61.], [54., -29., 170., 61.]])
pred_bboxes = torch.Tensor([[0.4709, 0.6152, 0.1690, -0.4056],
[0.5399, 0.6653, 0.1162, -0.4162],
[0.4654, 0.6618, 0.1548, -0.4301],
[0.4786, 0.6197, 0.1896, -0.4479]])
grid_size = 32
expected_decode_bboxes = torch.Tensor(
[[-53.6102, -10.3096, 83.7478, 49.6824],
[-15.8700, -8.3901, 114.4236, 50.9693],
[11.1822, -8.0924, 146.6034, 50.4476],
[41.2068, -8.9232, 181.4236, 48.5840]])
assert expected_decode_bboxes.allclose(
coder.decode(bboxes, pred_bboxes, grid_size))
def test_delta_bbox_coder():
coder = DeltaXYWHBBoxCoder()
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((32, 32)))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)
batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(32, 32))[0]
assert out.allclose(batch_out)
batch_out = coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32)])[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert rois.shape == out.shape
# test add_ctr_clamp
coder = DeltaXYWHBBoxCoder(add_ctr_clamp=True, ctr_clamp=2)
rois = torch.Tensor([[0., 0., 6., 6.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[1., 1., 2., 2.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 27.1672, 27.1672],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
def test_tblr_bbox_coder():
coder = TBLRBBoxCoder(normalizer=15.)
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.5000, 0.5000, 0.5000, 0.5000],
[0.0000, 0.0000, 12.0000, 13.0000],
[0.0000, 0.5000, 0.0000, 0.5000],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(13, 12))
assert expected_decode_bboxes.allclose(out)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((13, 12)))
assert expected_decode_bboxes.allclose(out)
batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)
batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(13, 12))[0]
assert out.allclose(batch_out)
batch_out = coder.decode(
batch_rois, batch_deltas, max_shape=[(13, 12), (13, 12)])[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
coder.decode(batch_rois, batch_deltas, max_shape=[(13, 12)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert rois.shape == out.shape
|
import pytest
import torch
from mmdet.core.bbox.coder import (DeltaXYWHBBoxCoder, TBLRBBoxCoder,
YOLOBBoxCoder)
def test_yolo_bbox_coder():
coder = YOLOBBoxCoder()
bboxes = torch.Tensor([[-42., -29., 74., 61.], [-10., -29., 106., 61.],
[22., -29., 138., 61.], [54., -29., 170., 61.]])
pred_bboxes = torch.Tensor([[0.4709, 0.6152, 0.1690, -0.4056],
[0.5399, 0.6653, 0.1162, -0.4162],
[0.4654, 0.6618, 0.1548, -0.4301],
[0.4786, 0.6197, 0.1896, -0.4479]])
grid_size = 32
expected_decode_bboxes = torch.Tensor(
[[-53.6102, -10.3096, 83.7478, 49.6824],
[-15.8700, -8.3901, 114.4236, 50.9693],
[11.1822, -8.0924, 146.6034, 50.4476],
[41.2068, -8.9232, 181.4236, 48.5840]])
assert expected_decode_bboxes.allclose(
coder.decode(bboxes, pred_bboxes, grid_size))
def test_delta_bbox_coder():
coder = DeltaXYWHBBoxCoder()
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((32, 32)))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)
batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(32, 32))[0]
assert out.allclose(batch_out)
batch_out = coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32)])[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert rois.shape == out.shape
# test add_ctr_clamp
coder = DeltaXYWHBBoxCoder(add_ctr_clamp=True, ctr_clamp=2)
rois = torch.Tensor([[0., 0., 6., 6.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[1., 1., 2., 2.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 27.1672, 27.1672],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
def test_tblr_bbox_coder():
coder = TBLRBBoxCoder(normalizer=15.)
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.5000, 0.5000, 0.5000, 0.5000],
[0.0000, 0.0000, 12.0000, 13.0000],
[0.0000, 0.5000, 0.0000, 0.5000],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(13, 12))
assert expected_decode_bboxes.allclose(out)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((13, 12)))
assert expected_decode_bboxes.allclose(out)
batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)
batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(13, 12))[0]
assert out.allclose(batch_out)
batch_out = coder.decode(
batch_rois, batch_deltas, max_shape=[(13, 12), (13, 12)])[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
coder.decode(batch_rois, batch_deltas, max_shape=[(13, 12)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert rois.shape == out.shape
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for traceback_utils."""
import sys
import traceback
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import traceback_utils
class TracebackUtilsTest(test.TestCase):
def assert_trace_line_count(self, fn, count, filtering_enabled=True):
trace_line_count = -1
if filtering_enabled:
traceback_utils.enable_traceback_filtering()
else:
traceback_utils.disable_traceback_filtering()
self.assertEqual(
traceback_utils.is_traceback_filtering_enabled(), filtering_enabled)
try:
fn()
except Exception as e: # pylint: disable=broad-except
# We must count lines rather than frames because autograph transforms
# stack frames into a single large string
trace = '\n'.join(traceback.format_tb(e.__traceback__))
trace_line_count = len(trace.split('\n'))
self.assertGreater(trace_line_count, 0)
if filtering_enabled:
if sys.version_info >= (3, 13):
self.assertLessEqual(trace_line_count, count)
else:
self.assertLess(trace_line_count, count)
else:
self.assertGreater(trace_line_count, count)
def test_eager_add(self):
def fn():
x = array_ops.zeros((2, 3))
y = array_ops.zeros((2, 4))
_ = x + y
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=25, filtering_enabled=False)
def test_tfn_add(self):
@def_function.function
def fn():
x = array_ops.zeros((2, 3))
y = array_ops.zeros((2, 4))
return x + y
self.assert_trace_line_count(fn, count=10, filtering_enabled=True)
self.assert_trace_line_count(fn, count=25, filtering_enabled=False)
def test_tfn_div(self):
@def_function.function
def wrapped_fn(x):
return x / 0.
def fn():
wrapped_fn(0.5)
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=30, filtering_enabled=False)
def test_eager_argmax(self):
def fn():
_ = math_ops.argmax([0, 1], axis=2)
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=30, filtering_enabled=False)
def test_tfn_argmax(self):
@def_function.function
def wrapped_fn(x):
return math_ops.argmax(x, axis=2)
def fn():
wrapped_fn([0, 1])
if sys.version_info >= (3, 13):
self.assert_trace_line_count(fn, count=16, filtering_enabled=True)
else:
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=25, filtering_enabled=False)
def test_variable_constructor(self):
def fn():
_ = variables.Variable()
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=30, filtering_enabled=False)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for traceback_utils."""
import traceback
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import traceback_utils
class TracebackUtilsTest(test.TestCase):
def assert_trace_line_count(self, fn, count, filtering_enabled=True):
trace_line_count = -1
if filtering_enabled:
traceback_utils.enable_traceback_filtering()
else:
traceback_utils.disable_traceback_filtering()
self.assertEqual(
traceback_utils.is_traceback_filtering_enabled(), filtering_enabled)
try:
fn()
except Exception as e: # pylint: disable=broad-except
# We must count lines rather than frames because autograph transforms
# stack frames into a single large string
trace = '\n'.join(traceback.format_tb(e.__traceback__))
trace_line_count = len(trace.split('\n'))
self.assertGreater(trace_line_count, 0)
if filtering_enabled:
self.assertLess(trace_line_count, count)
else:
self.assertGreater(trace_line_count, count)
def test_eager_add(self):
def fn():
x = array_ops.zeros((2, 3))
y = array_ops.zeros((2, 4))
_ = x + y
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=25, filtering_enabled=False)
def test_tfn_add(self):
@def_function.function
def fn():
x = array_ops.zeros((2, 3))
y = array_ops.zeros((2, 4))
return x + y
self.assert_trace_line_count(fn, count=10, filtering_enabled=True)
self.assert_trace_line_count(fn, count=25, filtering_enabled=False)
def test_tfn_div(self):
@def_function.function
def wrapped_fn(x):
return x / 0.
def fn():
wrapped_fn(0.5)
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=30, filtering_enabled=False)
def test_eager_argmax(self):
def fn():
_ = math_ops.argmax([0, 1], axis=2)
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=30, filtering_enabled=False)
def test_tfn_argmax(self):
@def_function.function
def wrapped_fn(x):
return math_ops.argmax(x, axis=2)
def fn():
wrapped_fn([0, 1])
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=25, filtering_enabled=False)
def test_variable_constructor(self):
def fn():
_ = variables.Variable()
self.assert_trace_line_count(fn, count=15, filtering_enabled=True)
self.assert_trace_line_count(fn, count=30, filtering_enabled=False)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import MSEEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseMSEEvaluator(MSEEvaluator):
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
teacher_model=None,
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
):
super().__init__(
source_sentences=source_sentences,
target_sentences=target_sentences,
teacher_model=teacher_model,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
logger.warning(
"The SparseMSEEvaluator is not handling the mse compute with sparse tensors yet. Memory issues may occur."
)
def __call__(
self,
model: SparseEncoder,
output_path: str = None,
epoch: int = -1,
steps: int = -1,
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=False,
save_on_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self,
model: SparseEncoder,
metrics: dict[str, Any],
epoch: int = 0,
step: int = 0,
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import MSEEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseMSEEvaluator(MSEEvaluator):
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
teacher_model=None,
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
):
super().__init__(
source_sentences=source_sentences,
target_sentences=target_sentences,
teacher_model=teacher_model,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
logger.warning(
"The SparseMSEEvaluator is not handling the mse compute with sparse tensors yet. Memory issues may occur."
)
def __call__(
self,
model: SparseEncoder,
output_path: str = None,
epoch: int = -1,
steps: int = -1,
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=False,
**kwargs,
)
def store_metrics_in_model_card_data(
self,
model: SparseEncoder,
metrics: dict[str, Any],
epoch: int = 0,
step: int = 0,
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (3, ) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
|
import argparse
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (3, ) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from mmdet.registry import MODELS
MODELS.register_module('Linear', module=nn.Linear)
@MODELS.register_module(name='NormedLinear')
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Defaults to 1e-6.
"""
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self) -> None:
"""Initialize the weights."""
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedLinear`."""
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
@MODELS.register_module(name='NormedConv2d')
class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Defaults to 20.
power (int, optional): Power term. Defaults to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Defaults to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Defaults to False.
"""
def __init__(self,
*args,
tempearture: float = 20,
power: int = 1.0,
eps: float = 1e-6,
norm_over_kernel: bool = False,
**kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x: Tensor) -> Tensor:
"""Forward function for `NormedConv2d`."""
if not self.norm_over_kernel:
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) +
self.eps)
else:
weight_ = self.weight / (
self.weight.view(self.weight.size(0), -1).norm(
dim=1, keepdim=True).pow(self.power)[..., None, None] +
self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
else:
if torch.__version__ >= '1.8':
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
MODELS.register_module('Linear', module=nn.Linear)
@MODELS.register_module(name='NormedLinear')
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, **kwargs):
super(NormedLinear, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self):
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x):
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
@MODELS.register_module(name='NormedConv2d')
class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Default to False.
"""
def __init__(self,
*args,
tempearture=20,
power=1.0,
eps=1e-6,
norm_over_kernel=False,
**kwargs):
super(NormedConv2d, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x):
if not self.norm_over_kernel:
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) +
self.eps)
else:
weight_ = self.weight / (
self.weight.view(self.weight.size(0), -1).norm(
dim=1, keepdim=True).pow(self.power)[..., None, None] +
self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
else:
if torch.__version__ >= '1.8':
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_
|
# Copyright (c) OpenMMLab. All rights reserved.
from .det_tta import DetTTAModel
from .merge_augs import (merge_aug_bboxes, merge_aug_masks,
merge_aug_proposals, merge_aug_results,
merge_aug_scores)
__all__ = [
'merge_aug_bboxes', 'merge_aug_masks', 'merge_aug_proposals',
'merge_aug_scores', 'merge_aug_results', 'DetTTAModel'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .merge_augs import (merge_aug_bboxes, merge_aug_masks,
merge_aug_proposals, merge_aug_results,
merge_aug_scores)
__all__ = [
'merge_aug_bboxes', 'merge_aug_masks', 'merge_aug_proposals',
'merge_aug_scores', 'merge_aug_results'
]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from itertools import groupby
from typing import Dict, Iterable
from jina import DocumentArray, Executor, requests
class SimpleRanker(Executor):
"""
:class:`SimpleRanker` aggregates the score of the matched doc from the
matched chunks. For each matched doc, the score is aggregated from all the
matched chunks belonging to that doc. The score of the document is the minimum
score (min distance) among the chunks. The aggregated matches are sorted by
score (ascending).
"""
def __init__(
self,
metric: str = 'cosine',
ranking: str = 'min',
default_traversal_paths: Iterable[str] = ('r',),
*args,
**kwargs
):
"""
:param metric: the distance metric used in `scores`
:param renking: The ranking function that the executor uses. There are multiple
options:
- min: Select minimum score/distance and sort by minimum
- max: Select maximum score/distance and sort by maximum
- mean_min: Calculate mean score/distance and sort by minimum mean
- mean_max: Calculate mean score/distance and sort by maximum mean
:param default_traversal_paths: traverse path on docs, e.g. ['r'], ['c']
"""
super().__init__(*args, **kwargs)
self.metric = metric
assert ranking in ['min', 'max', 'mean_min', 'mean_max']
self.ranking = ranking
self.default_traversal_paths = default_traversal_paths
@requests(on='/search')
def rank(self, docs: DocumentArray, parameters: Dict, *args, **kwargs):
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
for doc in docs.traverse_flat(traversal_paths):
matches_of_chunks = []
for chunk in doc.chunks:
matches_of_chunks.extend(chunk.matches)
groups = groupby(
sorted(matches_of_chunks, key=lambda d: d.parent_id),
lambda d: d.parent_id,
)
for key, group in groups:
chunk_match_list = list(group)
if self.ranking == 'min':
chunk_match_list.sort(key=lambda m: m.scores[self.metric].value)
elif self.ranking == 'max':
chunk_match_list.sort(key=lambda m: -m.scores[self.metric].value)
match = chunk_match_list[0]
match.id = chunk_match_list[0].parent_id
if self.ranking in ['mean_min', 'mean_max']:
scores = [el.scores[self.metric].value for el in chunk_match_list]
match.scores[self.metric] = sum(scores) / len(scores)
doc.matches.append(match)
if self.ranking in ['min', 'mean_min']:
doc.matches.sort(key=lambda d: d.scores[self.metric].value)
elif self.ranking in ['max', 'mean_max']:
doc.matches.sort(key=lambda d: -d.scores[self.metric].value)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from itertools import groupby
from typing import Dict, Iterable
from jina import DocumentArray, Executor, requests
class SimpleRanker(Executor):
"""
:class:`SimpleRanker` aggregates the score of the matched doc from the
matched chunks. For each matched doc, the score is aggregated from all the
matched chunks belonging to that doc. The score of the document is the minimum
score (min distance) among the chunks. The aggregated matches are sorted by
score (ascending).
:param metric: the distance metric used in `scores`
:param renking: The ranking function that the executor uses. There are multiple
options:
- min: Select minimum score/distance and sort by minimum
- max: Select maximum score/distance and sort by maximum
- mean_min: Calculate mean score/distance and sort by minimum mean
- mean_max: Calculate mean score/distance and sort by maximum mean
:param default_traversal_paths: traverse path on docs, e.g. ['r'], ['c']
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(
self,
metric: str = 'cosine',
ranking: str = 'min',
default_traversal_paths: Iterable[str] = ('r',),
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.metric = metric
assert ranking in ['min', 'max', 'mean_min', 'mean_max']
self.ranking = ranking
self.default_traversal_paths = default_traversal_paths
@requests(on='/search')
def rank(self, docs: DocumentArray, parameters: Dict, *args, **kwargs):
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
for doc in docs.traverse_flat(traversal_paths):
matches_of_chunks = []
for chunk in doc.chunks:
matches_of_chunks.extend(chunk.matches)
groups = groupby(
sorted(matches_of_chunks, key=lambda d: d.parent_id),
lambda d: d.parent_id,
)
for key, group in groups:
chunk_match_list = list(group)
if self.ranking == 'min':
chunk_match_list.sort(key=lambda m: m.scores[self.metric].value)
elif self.ranking == 'max':
chunk_match_list.sort(key=lambda m: -m.scores[self.metric].value)
match = chunk_match_list[0]
match.id = chunk_match_list[0].parent_id
if self.ranking in ['mean_min', 'mean_max']:
scores = [el.scores[self.metric].value for el in chunk_match_list]
match.scores[self.metric] = sum(scores) / len(scores)
doc.matches.append(match)
if self.ranking in ['min', 'mean_min']:
doc.matches.sort(key=lambda d: d.scores[self.metric].value)
elif self.ranking in ['max', 'mean_max']:
doc.matches.sort(key=lambda d: -d.scores[self.metric].value)
|
from __future__ import annotations
from dataclasses import field
from typing import Any, Callable
import torch
from sentence_transformers.data_collator import SentenceTransformerDataCollator
class CrossEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a CrossEncoder model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
It is important that the columns are in the expected order. For example, if your dataset has columns
"answer", "question" in that order, then the MultipleNegativesRankingLoss will consider
"answer" as the anchor and "question" as the positive, and it will (unexpectedly) optimize for
"given the answer, what is the question?".
"""
tokenize_fn: Callable
valid_label_columns: list[str] = field(default_factory=lambda: ["label", "score"])
_warned_columns: set[tuple[str]] = field(default_factory=set, init=False, repr=False)
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
column_names = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in column_names:
column_names.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in column_names:
batch["label"] = torch.tensor([row[label_column] for row in features])
column_names.remove(label_column)
break
for column_name in column_names:
# If the prompt length has been set, we should add it to the batch
if column_name.endswith("_prompt_length") and column_name[: -len("_prompt_length")] in column_names:
batch[column_name] = torch.tensor([row[column_name] for row in features], dtype=torch.int)
continue
batch[column_name] = [row[column_name] for row in features]
return batch
|
from __future__ import annotations
from dataclasses import field
from typing import Any, Callable
import torch
from sentence_transformers.data_collator import SentenceTransformerDataCollator
class CrossEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a CrossEncoder model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
It is important that the columns are in the expected order. For example, if your dataset has columns
"answer", "question" in that order, then the MultipleNegativesRankingLoss will consider
"answer" as the anchor and "question" as the positive, and it will (unexpectedly) optimize for
"given the answer, what is the question?".
"""
tokenize_fn: Callable
valid_label_columns: list[str] = field(default_factory=lambda: ["label", "score"])
_warned_columns: set[tuple[str]] = field(default_factory=set, init=False, repr=False)
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
column_names = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in column_names:
column_names.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# TODO:
# if tuple(column_names) not in self._warned_columns:
# self.maybe_warn_about_column_order(column_names)
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in column_names:
batch["label"] = torch.tensor([row[label_column] for row in features])
column_names.remove(label_column)
break
for column_name in column_names:
# If the prompt length has been set, we should add it to the batch
if column_name.endswith("_prompt_length") and column_name[: -len("_prompt_length")] in column_names:
batch[column_name] = torch.tensor([row[column_name] for row in features], dtype=torch.int)
continue
batch[column_name] = [row[column_name] for row in features]
return batch
|
_base_ = 'faster-rcnn_r50-caffe-dc5_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
_base_.train_dataloader.dataset.pipeline = train_pipeline
|
_base_ = 'faster-rcnn_r50-caffe-dc5_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
_base_.train_dataloader.dataset.pipeline = train_pipeline
|
"""Test simple function agent."""
from typing import Any, Dict, Tuple
import pytest
from llama_index.core.agent.custom.simple_function import FnAgentWorker
def mock_foo_fn_no_state_param() -> Tuple[None, bool]:
"""Mock agent input function without a state."""
return None, True
def mock_foo_fn(state: dict) -> Tuple[Dict[str, Any], bool]:
"""Mock agent input function."""
if "max_count" not in state:
raise ValueError("max_count must be specified.")
if "input" not in state:
state["input"] = state["__task__"].input
state["count"] = 0
is_done = False
else:
state["input"] = state["input"] + ":foo"
state["count"] += 1
is_done = state["count"] >= state["max_count"]
state["__output__"] = state["input"]
return state, is_done
async def async_mock_foo_fn(state: dict) -> Tuple[Dict[str, Any], bool]:
"""Mock async agent input function."""
return mock_foo_fn(state)
def test_fn_agent() -> None:
"""Test function agent."""
agent = FnAgentWorker(fn=mock_foo_fn, initial_state={"max_count": 5}).as_agent()
response = agent.query("hello")
assert str(response) == "hello:foo:foo:foo:foo:foo"
with pytest.raises(ValueError):
agent = FnAgentWorker(fn=mock_foo_fn).as_agent()
response = agent.query("hello")
def test_fn_agent_init() -> None:
"""Test function agent init."""
with pytest.raises(ValueError) as error_info:
FnAgentWorker(fn=mock_foo_fn_no_state_param).as_agent()
assert (
str(error_info.value)
== "StatefulFnComponent must have 'state' as required parameters"
)
@pytest.mark.asyncio
async def test_run_async_step() -> None:
"""Test run async step."""
agent_without_async_fn = FnAgentWorker(
fn=mock_foo_fn, asnc_fn=None, initial_state={"max_count": 5}
).as_agent()
response = await agent_without_async_fn.aquery("hello")
assert str(response) == "hello:foo:foo:foo:foo:foo"
agent_with_async_fn = FnAgentWorker(
fn=mock_foo_fn, async_fn=async_mock_foo_fn, initial_state={"max_count": 5}
).as_agent()
response = await agent_with_async_fn.aquery("hello")
assert str(response) == "hello:foo:foo:foo:foo:foo"
|
"""Test simple function agent."""
from typing import Any, Dict, Tuple
import pytest
from llama_index.core.agent.custom.simple_function import FnAgentWorker
def mock_foo_fn_no_state_param() -> Tuple[None, bool]:
"""Mock agent input function without a state."""
return None, True
def mock_foo_fn(state: dict) -> Tuple[Dict[str, Any], bool]:
"""Mock agent input function."""
if "max_count" not in state:
raise ValueError("max_count must be specified.")
if "input" not in state:
state["input"] = state["__task__"].input
state["count"] = 0
is_done = False
else:
state["input"] = state["input"] + ":foo"
state["count"] += 1
is_done = state["count"] >= state["max_count"]
state["__output__"] = state["input"]
return state, is_done
async def async_mock_foo_fn(state: dict) -> Tuple[Dict[str, Any], bool]:
"""Mock async agent input function."""
return mock_foo_fn(state)
def test_fn_agent() -> None:
"""Test function agent."""
agent = FnAgentWorker(fn=mock_foo_fn, initial_state={"max_count": 5}).as_agent()
response = agent.query("hello")
assert str(response) == "hello:foo:foo:foo:foo:foo"
with pytest.raises(ValueError):
agent = FnAgentWorker(fn=mock_foo_fn).as_agent()
response = agent.query("hello")
def test_fn_agent_init() -> None:
"""Test function agent init."""
with pytest.raises(ValueError) as error_info:
FnAgentWorker(fn=mock_foo_fn_no_state_param).as_agent()
assert (
str(error_info.value)
== "StatefulFnComponent must have 'state' as required parameters"
)
@pytest.mark.asyncio()
async def test_run_async_step() -> None:
"""Test run async step."""
agent_without_async_fn = FnAgentWorker(
fn=mock_foo_fn, asnc_fn=None, initial_state={"max_count": 5}
).as_agent()
response = await agent_without_async_fn.aquery("hello")
assert str(response) == "hello:foo:foo:foo:foo:foo"
agent_with_async_fn = FnAgentWorker(
fn=mock_foo_fn, async_fn=async_mock_foo_fn, initial_state={"max_count": 5}
).as_agent()
response = await agent_with_async_fn.aquery("hello")
assert str(response) == "hello:foo:foo:foo:foo:foo"
|
import platform
import sys
from pathlib import Path
import pkg_resources
from setuptools import find_packages, setup
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux") and platform.machine() == "x86_64":
requirements.append("triton>=2.0.0")
setup(
name="openai-whisper",
py_modules=["whisper"],
version=read_version(),
description="Robust Speech Recognition via Large-Scale Weak Supervision",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
readme="README.md",
python_requires=">=3.8",
author="OpenAI",
url="https://github.com/openai/whisper",
license="MIT",
packages=find_packages(exclude=["tests*"]),
install_requires=[
str(r)
for r in pkg_resources.parse_requirements(
Path(__file__).with_name("requirements.txt").open()
)
],
entry_points={
"console_scripts": ["whisper=whisper.transcribe:cli"],
},
include_package_data=True,
extras_require={"dev": ["pytest", "scipy", "black", "flake8", "isort"]},
)
|
import platform
import sys
from pathlib import Path
import pkg_resources
from setuptools import find_packages, setup
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux") and platform.machine() == "x86_64":
requirements.append("triton>=2.0.0,<3")
setup(
name="openai-whisper",
py_modules=["whisper"],
version=read_version(),
description="Robust Speech Recognition via Large-Scale Weak Supervision",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
readme="README.md",
python_requires=">=3.8",
author="OpenAI",
url="https://github.com/openai/whisper",
license="MIT",
packages=find_packages(exclude=["tests*"]),
install_requires=[
str(r)
for r in pkg_resources.parse_requirements(
Path(__file__).with_name("requirements.txt").open()
)
],
entry_points={
"console_scripts": ["whisper=whisper.transcribe:cli"],
},
include_package_data=True,
extras_require={"dev": ["pytest", "scipy", "black", "flake8", "isort"]},
)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of [`NdArray`][docarray.typing.NdArray], to represent a video tensor.
Adds video-specific features to the tensor.
---
```python
from typing import Optional
import numpy as np
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import VideoNdArray, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoNdArray]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=np.random.random((100, 224, 224, 3)),
)
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load().video)
# doc_2.video_tensor.save(file_path='/tmp/file_2.mp4')
```
---
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of NdArray, to represent a video tensor.
Adds video-specific features to the tensor.
---
```python
from typing import Optional
import numpy as np
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import VideoNdArray, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoNdArray]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=np.random.random((100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='/tmp/file_1.mp4')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='/tmp/file_1.mp4',
)
doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load().video)
doc_2.video_tensor.save(file_path='/tmp/file_2.mp4')
```
---
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
import hashlib
import secrets
from typing import NamedTuple
class APIKeyContainer(NamedTuple):
"""Container for API key parts."""
raw: str
prefix: str
postfix: str
hash: str
class APIKeyManager:
PREFIX: str = "agpt_"
PREFIX_LENGTH: int = 8
POSTFIX_LENGTH: int = 8
def generate_api_key(self) -> APIKeyContainer:
"""Generate a new API key with all its parts."""
raw_key = f"{self.PREFIX}{secrets.token_urlsafe(32)}"
return APIKeyContainer(
raw=raw_key,
prefix=raw_key[: self.PREFIX_LENGTH],
postfix=raw_key[-self.POSTFIX_LENGTH :],
hash=hashlib.sha256(raw_key.encode()).hexdigest(),
)
def verify_api_key(self, provided_key: str, stored_hash: str) -> bool:
"""Verify if a provided API key matches the stored hash."""
if not provided_key.startswith(self.PREFIX):
return False
return hashlib.sha256(provided_key.encode()).hexdigest() == stored_hash
|
from typing import NamedTuple
import secrets
import hashlib
class APIKeyContainer(NamedTuple):
"""Container for API key parts."""
raw: str
prefix: str
postfix: str
hash: str
class APIKeyManager:
PREFIX: str = "agpt_"
PREFIX_LENGTH: int = 8
POSTFIX_LENGTH: int = 8
def generate_api_key(self) -> APIKeyContainer:
"""Generate a new API key with all its parts."""
raw_key = f"{self.PREFIX}{secrets.token_urlsafe(32)}"
return APIKeyContainer(
raw=raw_key,
prefix=raw_key[:self.PREFIX_LENGTH],
postfix=raw_key[-self.POSTFIX_LENGTH:],
hash=hashlib.sha256(raw_key.encode()).hexdigest()
)
def verify_api_key(self, provided_key: str, stored_hash: str) -> bool:
"""Verify if a provided API key matches the stored hash."""
if not provided_key.startswith(self.PREFIX):
return False
return hashlib.sha256(provided_key.encode()).hexdigest() == stored_hash
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.20.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.20.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads.autoassign_head import AutoAssignHead
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_autoassign_head_loss():
"""Tests autoassign head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(assigner=None, allowed_border=-1, pos_weight=-1, debug=False))
self = AutoAssignHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, objectnesses = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_pos_loss = empty_gt_losses['loss_pos']
empty_neg_loss = empty_gt_losses['loss_neg']
empty_center_loss = empty_gt_losses['loss_center']
assert empty_neg_loss.item() > 0, 'cls loss should be non-zero'
assert empty_pos_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_center_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_pos_loss = one_gt_losses['loss_pos']
onegt_neg_loss = one_gt_losses['loss_neg']
onegt_center_loss = one_gt_losses['loss_center']
assert onegt_pos_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_neg_loss.item() > 0, 'box loss should be non-zero'
assert onegt_center_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
self = AutoAssignHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
strides=(4, ))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self.get_bboxes(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads.autoassign_head import AutoAssignHead
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_autoassign_head_loss():
"""Tests autoassign head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(assigner=None, allowed_border=-1, pos_weight=-1, debug=False))
self = AutoAssignHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, objectnesses = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_pos_loss = empty_gt_losses['loss_pos']
empty_neg_loss = empty_gt_losses['loss_neg']
empty_center_loss = empty_gt_losses['loss_center']
assert empty_neg_loss.item() > 0, 'cls loss should be non-zero'
assert empty_pos_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_center_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_pos_loss = one_gt_losses['loss_pos']
onegt_neg_loss = one_gt_losses['loss_neg']
onegt_center_loss = one_gt_losses['loss_center']
assert onegt_pos_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_neg_loss.item() > 0, 'box loss should be non-zero'
assert onegt_center_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self.get_bboxes(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
# TODO: Consider the naming of this class
class CrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fct: nn.Module = nn.Identity(), **kwargs) -> None:
super().__init__()
self.model = model
self.activation_fct = activation_fct
self.ce_loss = nn.CrossEntropyLoss(**kwargs)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"CrossEntropyLoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0]
logits = self.activation_fct(logits)
loss = self.ce_loss(logits, labels)
return loss
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
# TODO: Consider the naming of this class
class CrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder) -> None:
super().__init__()
self.model = model
self.ce_loss = nn.CrossEntropyLoss()
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"CrossEntropyLoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0]
loss = self.ce_loss(logits, labels)
return loss
|
import pytest
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((5)), 1),
(torch.zeros((1, 5)), 2),
(torch.zeros((5, 5)), 2),
(torch.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert TorchCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((10,)), (10,)),
(torch.zeros((5, 5)), (5, 5)),
(torch.zeros(()), ()),
],
)
def test_shape(array, result):
shape = TorchCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_empty():
tensor = TorchCompBackend.empty((10, 3))
assert tensor.shape == (10, 3)
|
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
def test_empty():
tensor = TorchCompBackend.empty((10, 3))
assert tensor.shape == (10, 3)
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device, requires_grad=True)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device, requires_grad=True)
self.assertTrue(gradcheck(fn, (x, y, mode)))
self.assertTrue(gradgradcheck(fn, (x, y, mode)))
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self.assertTrue(gradcheck(F.add_noise, (waveform, noise, lengths, snr)))
self.assertTrue(gradgradcheck(F.add_noise, (waveform, noise, lengths, snr)))
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
# can be replaced with math.prod when we drop 3.7 support
def prod(iterable):
ret = 1
for item in iterable:
ret *= item
return ret
numel = prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
|
import torch
import torchaudio.prototype.functional as F
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device, requires_grad=True)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device, requires_grad=True)
self.assertTrue(gradcheck(fn, (x, y, mode)))
self.assertTrue(gradgradcheck(fn, (x, y, mode)))
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self.assertTrue(gradcheck(F.add_noise, (waveform, noise, lengths, snr)))
self.assertTrue(gradgradcheck(F.add_noise, (waveform, noise, lengths, snr)))
|
from typing import List, Union
class InputExample:
"""Structure for one input example with texts, the label and a unique id"""
def __init__(self, guid: str = "", texts: List[str] = None, label: Union[int, float] = 0):
"""
Creates one InputExample with the given texts, guid and label
Args:
guid: id for the example
texts: the texts for the example.
label: the label for the example
"""
self.guid = guid
self.texts = texts
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
|
from typing import Union, List
class InputExample:
"""Structure for one input example with texts, the label and a unique id"""
def __init__(self, guid: str = "", texts: List[str] = None, label: Union[int, float] = 0):
"""
Creates one InputExample with the given texts, guid and label
Args:
guid: id for the example
texts: the texts for the example.
label: the label for the example
"""
self.guid = guid
self.texts = texts
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.3.0'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.2.0'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.11.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.10.2.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
import time
import pytest
from backend.util.decorator import async_error_logged, error_logged, time_measured
@time_measured
def example_function(a: int, b: int, c: int) -> int:
time.sleep(0.5)
return a + b + c
@error_logged(swallow=True)
def example_function_with_error_swallowed(a: int, b: int, c: int) -> int:
raise ValueError("This error should be swallowed")
@error_logged(swallow=False)
def example_function_with_error_not_swallowed(a: int, b: int, c: int) -> int:
raise ValueError("This error should NOT be swallowed")
@async_error_logged(swallow=True)
async def async_function_with_error_swallowed() -> int:
raise ValueError("This async error should be swallowed")
@async_error_logged(swallow=False)
async def async_function_with_error_not_swallowed() -> int:
raise ValueError("This async error should NOT be swallowed")
def test_timer_decorator():
"""Test that the time_measured decorator correctly measures execution time."""
info, res = example_function(1, 2, 3)
assert info.cpu_time >= 0
assert info.wall_time >= 0.4
assert res == 6
def test_error_decorator_swallow_true():
"""Test that error_logged(swallow=True) logs and swallows errors."""
res = example_function_with_error_swallowed(1, 2, 3)
assert res is None
def test_error_decorator_swallow_false():
"""Test that error_logged(swallow=False) logs errors but re-raises them."""
with pytest.raises(ValueError, match="This error should NOT be swallowed"):
example_function_with_error_not_swallowed(1, 2, 3)
def test_async_error_decorator_swallow_true():
"""Test that async_error_logged(swallow=True) logs and swallows errors."""
import asyncio
async def run_test():
res = await async_function_with_error_swallowed()
return res
res = asyncio.run(run_test())
assert res is None
def test_async_error_decorator_swallow_false():
"""Test that async_error_logged(swallow=False) logs errors but re-raises them."""
import asyncio
async def run_test():
await async_function_with_error_not_swallowed()
with pytest.raises(ValueError, match="This async error should NOT be swallowed"):
asyncio.run(run_test())
|
import time
from backend.util.decorator import error_logged, time_measured
@time_measured
def example_function(a: int, b: int, c: int) -> int:
time.sleep(0.5)
return a + b + c
@error_logged
def example_function_with_error(a: int, b: int, c: int) -> int:
raise ValueError("This is a test error")
def test_timer_decorator():
info, res = example_function(1, 2, 3)
assert info.cpu_time >= 0
assert info.wall_time >= 0.4
assert res == 6
def test_error_decorator():
res = example_function_with_error(1, 2, 3)
assert res is None
|
from abc import ABC, abstractmethod
from typing import Dict, Iterator, List, Optional, Type
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from docarray import BaseDoc, DocArray
class AbstractDocStore(ABC):
@staticmethod
@abstractmethod
def list(namespace: str, show_table: bool) -> List[str]:
"""List all DocArrays in the specified backend at the namespace.
:param namespace: The namespace to list
:param show_table: If true, a table is printed to the console
:return: A list of DocArray names
"""
...
@staticmethod
@abstractmethod
def delete(name: str, missing_ok: bool) -> bool:
"""Delete the DocArray object at the specified name
:param name: The name of the DocArray to delete
:param missing_ok: If true, no error will be raised if the DocArray does not exist.
:return: True if the DocArray was deleted, False if it did not exist.
"""
...
@staticmethod
@abstractmethod
def push(
da: 'DocArray',
name: str,
public: bool,
show_progress: bool,
branding: Optional[Dict],
) -> Dict:
"""Push this DocArray to the specified name.
:param da: The DocArray to push
:param name: The name to push to
:param public: Whether the DocArray should be publicly accessible
:param show_progress: If true, a progress bar will be displayed.
:param branding: Branding information to be stored with the DocArray
"""
...
@staticmethod
@abstractmethod
def push_stream(
docs: Iterator['BaseDoc'],
url: str,
public: bool = True,
show_progress: bool = False,
branding: Optional[Dict] = None,
) -> Dict:
"""Push a stream of documents to the specified name.
:param docs: a stream of documents
:param url: The name to push to
:param public: Whether the DocArray should be publicly accessible
:param show_progress: If true, a progress bar will be displayed.
:param branding: Branding information to be stored with the DocArray
"""
...
@staticmethod
@abstractmethod
def pull(
da_cls: Type['DocArray'],
name: str,
show_progress: bool,
local_cache: bool,
) -> 'DocArray':
"""Pull a DocArray from the specified name.
:param da_cls: The DocArray class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocArray will be cached locally
:return: A DocArray
"""
...
@staticmethod
@abstractmethod
def pull_stream(
da_cls: Type['DocArray'],
name: str,
show_progress: bool,
local_cache: bool,
) -> Iterator['BaseDoc']:
"""Pull a stream of documents from the specified name.
:param da_cls: The DocArray class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocArray will be cached locally
:return: An iterator of documents"""
...
|
from abc import ABC, abstractmethod
from typing import Dict, Iterator, List, Optional, Type
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from docarray import BaseDocument, DocumentArray
class AbstractDocStore(ABC):
@staticmethod
@abstractmethod
def list(namespace: str, show_table: bool) -> List[str]:
"""List all DocumentArrays in the specified backend at the namespace.
:param namespace: The namespace to list
:param show_table: If true, a table is printed to the console
:return: A list of DocumentArray names
"""
...
@staticmethod
@abstractmethod
def delete(name: str, missing_ok: bool) -> bool:
"""Delete the DocumentArray object at the specified name
:param name: The name of the DocumentArray to delete
:param missing_ok: If true, no error will be raised if the DocumentArray does not exist.
:return: True if the DocumentArray was deleted, False if it did not exist.
"""
...
@staticmethod
@abstractmethod
def push(
da: 'DocumentArray',
name: str,
public: bool,
show_progress: bool,
branding: Optional[Dict],
) -> Dict:
"""Push this DocumentArray to the specified name.
:param da: The DocumentArray to push
:param name: The name to push to
:param public: Whether the DocumentArray should be publicly accessible
:param show_progress: If true, a progress bar will be displayed.
:param branding: Branding information to be stored with the DocumentArray
"""
...
@staticmethod
@abstractmethod
def push_stream(
docs: Iterator['BaseDocument'],
url: str,
public: bool = True,
show_progress: bool = False,
branding: Optional[Dict] = None,
) -> Dict:
"""Push a stream of documents to the specified name.
:param docs: a stream of documents
:param url: The name to push to
:param public: Whether the DocumentArray should be publicly accessible
:param show_progress: If true, a progress bar will be displayed.
:param branding: Branding information to be stored with the DocumentArray
"""
...
@staticmethod
@abstractmethod
def pull(
da_cls: Type['DocumentArray'],
name: str,
show_progress: bool,
local_cache: bool,
) -> 'DocumentArray':
"""Pull a DocumentArray from the specified name.
:param da_cls: The DocumentArray class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocumentArray will be cached locally
:return: A DocumentArray
"""
...
@staticmethod
@abstractmethod
def pull_stream(
da_cls: Type['DocumentArray'],
name: str,
show_progress: bool,
local_cache: bool,
) -> Iterator['BaseDocument']:
"""Pull a stream of documents from the specified name.
:param da_cls: The DocumentArray class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocumentArray will be cached locally
:return: An iterator of documents"""
...
|
import tempfile
from collections.abc import Generator
from typing import cast
import pytest
from langchain_core.documents import Document
from langchain.storage._lc_store import create_kv_docstore, create_lc_store
from langchain.storage.file_system import LocalFileStore
@pytest.fixture
def file_store() -> Generator[LocalFileStore, None, None]:
# Create a temporary directory for testing
with tempfile.TemporaryDirectory() as temp_dir:
# Instantiate the LocalFileStore with the temporary directory as the root path
store = LocalFileStore(temp_dir)
yield store
def test_create_lc_store(file_store: LocalFileStore) -> None:
"""Test that a docstore is created from a base store."""
docstore = create_lc_store(file_store)
docstore.mset([("key1", Document(page_content="hello", metadata={"key": "value"}))])
fetched_doc = cast(Document, docstore.mget(["key1"])[0])
assert fetched_doc.page_content == "hello"
assert fetched_doc.metadata == {"key": "value"}
def test_create_kv_store(file_store: LocalFileStore) -> None:
"""Test that a docstore is created from a base store."""
docstore = create_kv_docstore(file_store)
docstore.mset([("key1", Document(page_content="hello", metadata={"key": "value"}))])
fetched_doc = docstore.mget(["key1"])[0]
assert isinstance(fetched_doc, Document)
assert fetched_doc.page_content == "hello"
assert fetched_doc.metadata == {"key": "value"}
|
import tempfile
from typing import Generator, cast
import pytest
from langchain_core.documents import Document
from langchain.storage._lc_store import create_kv_docstore, create_lc_store
from langchain.storage.file_system import LocalFileStore
@pytest.fixture
def file_store() -> Generator[LocalFileStore, None, None]:
# Create a temporary directory for testing
with tempfile.TemporaryDirectory() as temp_dir:
# Instantiate the LocalFileStore with the temporary directory as the root path
store = LocalFileStore(temp_dir)
yield store
def test_create_lc_store(file_store: LocalFileStore) -> None:
"""Test that a docstore is created from a base store."""
docstore = create_lc_store(file_store)
docstore.mset([("key1", Document(page_content="hello", metadata={"key": "value"}))])
fetched_doc = cast(Document, docstore.mget(["key1"])[0])
assert fetched_doc.page_content == "hello"
assert fetched_doc.metadata == {"key": "value"}
def test_create_kv_store(file_store: LocalFileStore) -> None:
"""Test that a docstore is created from a base store."""
docstore = create_kv_docstore(file_store)
docstore.mset([("key1", Document(page_content="hello", metadata={"key": "value"}))])
fetched_doc = docstore.mget(["key1"])[0]
assert isinstance(fetched_doc, Document)
assert fetched_doc.page_content == "hello"
assert fetched_doc.metadata == {"key": "value"}
|
_base_ = './mask-rcnn_r50_fpn_seesaw-loss_sample1e-3-ms-2x_lvis-v1.py'
model = dict(
roi_head=dict(
mask_head=dict(
predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
|
_base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py'
model = dict(
roi_head=dict(
mask_head=dict(
predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
|
_base_ = ['co_dino_5scale_swin_l_lsj_16xb1_1x_coco.py']
model = dict(backbone=dict(drop_path_rate=0.5))
param_scheduler = [dict(type='MultiStepLR', milestones=[30])]
train_cfg = dict(max_epochs=36)
|
_base_ = ['co_dino_5scale_swin_l_lsj_16xb1_1x_coco.py']
model = dict(backbone=dict(drop_path_rate=0.5))
param_scheduler = [dict(milestones=[30])]
train_cfg = dict(max_epochs=36)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.9.1'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.9.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# TODO: Add _log_api_usage_once() in all mid-level kernels. If they remain not jit-scriptable we can use decorators
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
convert_dtype_image_tensor,
convert_dtype,
convert_dtype_video,
convert_image_dtype,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_box,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image_pil,
rgb_to_grayscale_image_tensor,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
)
from ._geometry import (
affine,
affine_bounding_box,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_box,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_box,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_box,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_box,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_box,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_box,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_box,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_box,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_box,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_box,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, to_grayscale, to_tensor # usort: skip
|
# TODO: Add _log_api_usage_once() in all mid-level kernels. If they remain not jit-scriptable we can use decorators
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
convert_dtype_image_tensor,
convert_dtype,
convert_dtype_video,
convert_image_dtype,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_box,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
)
from ._geometry import (
affine,
affine_bounding_box,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_box,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_box,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_box,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_box,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_box,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_box,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_box,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_box,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_box,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_box,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, rgb_to_grayscale, to_grayscale, to_tensor # usort: skip
|
import pytest
from docarray import DocumentArray
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
from docarray.array.milvus import DocumentArrayMilvus, MilvusConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=5)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=5)),
(DocumentArrayQdrant, QdrantConfig(n_dim=5)),
(DocumentArrayElastic, ElasticConfig(n_dim=5)),
(DocumentArrayRedis, RedisConfig(n_dim=5)),
(DocumentArrayMilvus, MilvusConfig(n_dim=5)),
],
)
def test_empty_non_zero(da_cls, config, start_storage):
# Assert .empty provides a da with 0 docs
if config:
da = da_cls.empty(config=config)
else:
da = da_cls.empty()
assert len(da) == 0
# Assert .empty provides a da of the correct length
if config:
da = da_cls.empty(10, config=config)
else:
da = da_cls.empty(10)
assert len(da) == 10
|
import pytest
from docarray import DocumentArray
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=5)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=5)),
(DocumentArrayQdrant, QdrantConfig(n_dim=5)),
(DocumentArrayElastic, ElasticConfig(n_dim=5)),
(DocumentArrayRedis, RedisConfig(n_dim=5)),
],
)
def test_empty_non_zero(da_cls, config, start_storage):
# Assert .empty provides a da with 0 docs
if config:
da = da_cls.empty(config=config)
else:
da = da_cls.empty()
assert len(da) == 0
# Assert .empty provides a da of the correct length
if config:
da = da_cls.empty(10, config=config)
else:
da = da_cls.empty(10)
assert len(da) == 10
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat, ConvertColorSpace, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
RemoveSmallBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._type_conversion import DecodeImage, LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import Grayscale, RandomGrayscale, ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat, ConvertColorSpace, ConvertImageDtype
from ._misc import GaussianBlur, Identity, Lambda, LinearTransformation, Normalize, RemoveSmallBoundingBoxes, ToDtype
from ._type_conversion import DecodeImage, LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import Grayscale, RandomGrayscale, ToTensor # usort: skip
|
_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py']
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
optim_wrapper = dict(optimizer=dict(weight_decay=0.00005))
|
_base_ = [
'../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
optim_wrapper = dict(optimizer=dict(weight_decay=0.00005))
|
import os
from typing import Dict
from hubble.executor.helper import parse_hub_uri
from hubble.executor.hubio import HubIO
from jina import __default_executor__, __version__
from jina.enums import PodRoleType
def get_image_name(uses: str) -> str:
"""The image can be provided in different formats by the user.
This function converts it to an image name which can be understood by k8s.
It uses the Hub api to get the image name and the latest tag on Docker Hub.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: image name
:return: normalized image name
"""
try:
rebuild_image = 'JINA_HUB_NO_IMAGE_REBUILD' not in os.environ
scheme, name, tag, secret = parse_hub_uri(uses)
meta_data, _ = HubIO.fetch_meta(
name, tag, secret=secret, rebuild_image=rebuild_image, force=True
)
image_name = meta_data.image_name
return image_name
except Exception:
if uses.startswith('docker'):
# docker:// is a valid requirement and user may want to put its own image
return uses.replace('docker://', '')
raise
def to_compatible_name(name: str) -> str:
"""Converts the deployment name to a valid name for K8s and docker compose.
:param name: name of the deployment
:return: compatible name
"""
return name.replace('/', '-').replace('_', '-').lower()
def get_base_executor_version():
"""
Get the version of jina to be used
:return: the version tag
"""
import requests
try:
url = 'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags'
result: Dict = requests.get(url, params={'name': __version__}).json()
if result.get('count', 0) > 0:
return __version__
else:
return 'master'
except:
return 'master'
def construct_runtime_container_args(cargs, uses_metas, uses_with, pod_type):
"""
Construct a set of Namespace arguments into a list of arguments to pass to a container entrypoint
:param cargs: The namespace arguments
:param uses_metas: The uses_metas to override
:param uses_with: The uses_with to override
:param pod_type: The pod_type
:return: Arguments to pass to container
"""
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace_id',
'upload_files',
'noblock_on_start',
'env',
}
if pod_type == PodRoleType.HEAD:
taboo.add('uses')
taboo.add('workspace')
if pod_type in {PodRoleType.WORKER, PodRoleType.GATEWAY}:
taboo.add('polling')
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pod_parser(),
taboo=taboo,
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def validate_uses(uses: str):
"""Validate uses argument
:param uses: uses argument
:return: boolean indicating whether is a valid uses to be used in K8s or docker compose
"""
if uses == __default_executor__ or uses.startswith('docker://'):
return True
try:
scheme, _, _, _ = parse_hub_uri(uses)
if scheme in {'jinahub+docker', 'jinahub+sandbox'}:
return True
except ValueError:
return False
|
import os
from typing import Dict
from jina import __default_executor__, __version__
from jina.enums import PodRoleType
from jina.hubble.helper import parse_hub_uri
from jina.hubble.hubio import HubIO
def get_image_name(uses: str) -> str:
"""The image can be provided in different formats by the user.
This function converts it to an image name which can be understood by k8s.
It uses the Hub api to get the image name and the latest tag on Docker Hub.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: image name
:return: normalized image name
"""
try:
rebuild_image = 'JINA_HUB_NO_IMAGE_REBUILD' not in os.environ
scheme, name, tag, secret = parse_hub_uri(uses)
meta_data, _ = HubIO.fetch_meta(
name, tag, secret=secret, rebuild_image=rebuild_image, force=True
)
image_name = meta_data.image_name
return image_name
except Exception:
if uses.startswith('docker'):
# docker:// is a valid requirement and user may want to put its own image
return uses.replace('docker://', '')
raise
def to_compatible_name(name: str) -> str:
"""Converts the deployment name to a valid name for K8s and docker compose.
:param name: name of the deployment
:return: compatible name
"""
return name.replace('/', '-').replace('_', '-').lower()
def get_base_executor_version():
"""
Get the version of jina to be used
:return: the version tag
"""
import requests
try:
url = 'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags'
result: Dict = requests.get(url, params={'name': __version__}).json()
if result.get('count', 0) > 0:
return __version__
else:
return 'master'
except:
return 'master'
def construct_runtime_container_args(cargs, uses_metas, uses_with, pod_type):
"""
Construct a set of Namespace arguments into a list of arguments to pass to a container entrypoint
:param cargs: The namespace arguments
:param uses_metas: The uses_metas to override
:param uses_with: The uses_with to override
:param pod_type: The pod_type
:return: Arguments to pass to container
"""
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace_id',
'upload_files',
'noblock_on_start',
'env',
}
if pod_type == PodRoleType.HEAD:
taboo.add('uses')
taboo.add('workspace')
if pod_type in {PodRoleType.WORKER, PodRoleType.GATEWAY}:
taboo.add('polling')
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pod_parser(),
taboo=taboo,
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def validate_uses(uses: str):
"""Validate uses argument
:param uses: uses argument
:return: boolean indicating whether is a valid uses to be used in K8s or docker compose
"""
if uses == __default_executor__ or uses.startswith('docker://'):
return True
try:
scheme, _, _, _ = parse_hub_uri(uses)
if scheme in {'jinahub+docker', 'jinahub+sandbox'}:
return True
except ValueError:
return False
|
import json
import multiprocessing
import os
import time
import pytest
from jina.helper import random_port
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import (
_generate_pod_args,
_validate_custom_gateway_process,
_validate_dummy_custom_gateway_response,
)
from tests.unit.yaml.dummy_gateway import DummyGateway
from tests.unit.yaml.dummy_gateway_get_streamer import DummyGatewayGetStreamer
cur_dir = os.path.dirname(os.path.abspath(__file__))
_dummy_gateway_yaml_path = os.path.join(cur_dir, '../../yaml/test-custom-gateway.yml')
_dummy_fastapi_gateway_yaml_path = os.path.join(
cur_dir, '../../yaml/test-fastapi-gateway.yml'
)
def _create_gateway_runtime(port, uses, uses_with, worker_port):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
deployments_metadata = '{"pod0": {"key1": "value1", "key2": "value2"}}'
with GatewayRuntime(
set_gateway_parser().parse_args(
[
'--port',
str(port),
'--uses',
uses,
'--uses-with',
json.dumps(uses_with),
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--deployments-metadata',
deployments_metadata,
]
)
) as runtime:
runtime.run_forever()
def _start_gateway_runtime(uses, uses_with, worker_port):
port = random_port()
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(port, uses, uses_with, worker_port),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
def _create_worker_runtime(port, uses):
args = _generate_pod_args(['--uses', uses, '--port', str(port)])
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _start_worker_runtime(uses):
port = random_port()
p = multiprocessing.Process(
target=_create_worker_runtime,
args=(port, uses),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
@pytest.mark.parametrize(
'uses,uses_with,expected',
[
('DummyGateway', {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}),
(
'DummyGatewayGetStreamer',
{},
{'arg1': None, 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
'DummyGatewayGetStreamer',
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
'DummyGatewayGetStreamer',
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
],
)
def test_custom_gateway_no_executors(uses, uses_with, expected):
worker_port, worker_process = _start_worker_runtime('ProcessExecutor')
gateway_port, gateway_process = _start_gateway_runtime(uses, uses_with, worker_port)
_validate_dummy_custom_gateway_response(gateway_port, expected)
_validate_custom_gateway_process(
gateway_port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
gateway_process.terminate()
gateway_process.join()
worker_process.terminate()
worker_process.join()
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
import json
import multiprocessing
import os
import time
import pytest
from jina.helper import random_port
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import (
_validate_custom_gateway_process,
_validate_dummy_custom_gateway_response,
)
from tests.unit.yaml.dummy_gateway import DummyGateway
from tests.unit.yaml.dummy_gateway_get_streamer import DummyGatewayGetStreamer
cur_dir = os.path.dirname(os.path.abspath(__file__))
_dummy_gateway_yaml_path = os.path.join(cur_dir, '../../yaml/test-custom-gateway.yml')
_dummy_fastapi_gateway_yaml_path = os.path.join(
cur_dir, '../../yaml/test-fastapi-gateway.yml'
)
def _create_gateway_runtime(port, uses, uses_with, worker_port):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
deployments_metadata = '{"pod0": {"key1": "value1", "key2": "value2"}}'
with GatewayRuntime(
set_gateway_parser().parse_args(
[
'--port',
str(port),
'--uses',
uses,
'--uses-with',
json.dumps(uses_with),
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--deployments-metadata',
deployments_metadata,
]
)
) as runtime:
runtime.run_forever()
def _start_gateway_runtime(uses, uses_with, worker_port):
port = random_port()
p = multiprocessing.Process(
target=_create_gateway_runtime,
args=(port, uses, uses_with, worker_port),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
def _create_worker_runtime(port, uses):
args = set_pod_parser().parse_args(['--uses', uses, '--port', str(port)])
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _start_worker_runtime(uses):
port = random_port()
p = multiprocessing.Process(
target=_create_worker_runtime,
args=(port, uses),
daemon=True,
)
p.start()
time.sleep(1)
return port, p
@pytest.mark.parametrize(
'uses,uses_with,expected',
[
('DummyGateway', {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}),
(
'DummyGatewayGetStreamer',
{},
{'arg1': None, 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
'DummyGatewayGetStreamer',
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
'DummyGateway',
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
'DummyGatewayGetStreamer',
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
],
)
def test_custom_gateway_no_executors(uses, uses_with, expected):
worker_port, worker_process = _start_worker_runtime('ProcessExecutor')
gateway_port, gateway_process = _start_gateway_runtime(uses, uses_with, worker_port)
_validate_dummy_custom_gateway_response(gateway_port, expected)
_validate_custom_gateway_process(
gateway_port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
gateway_process.terminate()
gateway_process.join()
worker_process.terminate()
worker_process.join()
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class AutoAssign(SingleStageDetector):
"""Implementation of `AutoAssign: Differentiable Label Assignment for Dense
Object Detection <https://arxiv.org/abs/2007.03496>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(AutoAssign, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
|
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class AutoAssign(SingleStageDetector):
"""Implementation of `AutoAssign: Differentiable Label Assignment for Dense
Object Detection <https://arxiv.org/abs/2007.03496>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(AutoAssign, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .d2_wrapper import Detectron2Wrapper
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import os.path as osp
from typing import Optional
from mmengine.fileio import dump
from . import root
from .registry import Registry
def traverse_registry_tree(registry: Registry, verbose: bool = True) -> list:
"""Traverse the whole registry tree from any given node, and collect
information of all registered modules in this registry tree.
Args:
registry (Registry): a registry node in the registry tree.
verbose (bool): Whether to print log. Default: True
Returns:
list: Statistic results of all modules in each node of the registry
tree.
"""
root_registry = registry.root
modules_info = []
def _dfs_registry(_registry):
if isinstance(_registry, Registry):
num_modules = len(_registry.module_dict)
scope = _registry.scope
registry_info = dict(num_modules=num_modules, scope=scope)
for name, registered_class in _registry.module_dict.items():
folder = '/'.join(registered_class.__module__.split('.')[:-1])
if folder in registry_info:
registry_info[folder].append(name)
else:
registry_info[folder] = [name]
if verbose:
print(f"Find {num_modules} modules in {scope}'s "
f"'{_registry.name}' registry ")
modules_info.append(registry_info)
else:
return
for _, child in _registry.children.items():
_dfs_registry(child)
_dfs_registry(root_registry)
return modules_info
def count_registered_modules(save_path: Optional[str] = None,
verbose: bool = True) -> dict:
"""Scan all modules in MMEngine's root and child registries and dump to
json.
Args:
save_path (str, optional): Path to save the json file.
verbose (bool): Whether to print log. Defaults to True.
Returns:
dict: Statistic results of all registered modules.
"""
# import modules to trigger registering
import mmengine.dataset
import mmengine.evaluator
import mmengine.hooks
import mmengine.model
import mmengine.optim
import mmengine.runner
import mmengine.visualization # noqa: F401
registries_info = {}
# traverse all registries in MMEngine
for item in dir(root):
if not item.startswith('__'):
registry = getattr(root, item)
if isinstance(registry, Registry):
registries_info[item] = traverse_registry_tree(
registry, verbose)
scan_data = dict(
scan_date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
registries=registries_info)
if verbose:
print('Finish registry analysis, got: ', scan_data)
if save_path is not None:
json_path = osp.join(save_path, 'modules_statistic_results.json')
dump(scan_data, json_path, indent=2)
print(f'Result has been saved to {json_path}')
return scan_data
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import os.path as osp
from typing import Optional
from mmengine.fileio import dump
from . import root
from .registry import Registry
def traverse_registry_tree(registry: Registry, verbose: bool = True) -> list:
"""Traverse the whole registry tree from any given node, and collect
information of all registered modules in this registry tree.
Args:
registry (Registry): a registry node in the registry tree.
verbose (bool): Whether to print log. Default: True
Returns:
list: Statistic results of all modules in each node of the registry
tree.
"""
root_registry = registry.root
modules_info = []
def _dfs_registry(_registry):
if isinstance(_registry, Registry):
num_modules = len(_registry.module_dict)
scope = _registry.scope
registry_info = dict(num_modules=num_modules, scope=scope)
for name, registered_class in _registry.module_dict.items():
folder = '/'.join(registered_class.__module__.split('.')[:-1])
if folder in registry_info:
registry_info[folder].append(name)
else:
registry_info[folder] = [name]
if verbose:
print(f"Find {num_modules} modules in {scope}'s "
f"'{_registry.name}' registry ")
modules_info.append(registry_info)
else:
return
for _, child in _registry.children.items():
_dfs_registry(child)
_dfs_registry(root_registry)
return modules_info
def count_registered_modules(save_path: Optional[str] = None,
verbose: bool = True) -> dict:
"""Scan all modules in MMEngine's root and child registries and dump to
json.
Args:
save_path (str, optional): Path to save the json file.
verbose (bool): Whether to print log. Default: True
Returns:
dict: Statistic results of all registered modules.
"""
registries_info = {}
# traverse all registries in MMEngine
for item in dir(root):
if not item.startswith('__'):
registry = getattr(root, item)
if isinstance(registry, Registry):
registries_info[item] = traverse_registry_tree(
registry, verbose)
scan_data = dict(
scan_date=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
registries=registries_info)
if verbose:
print('Finish registry analysis, got: ', scan_data)
if save_path is not None:
json_path = osp.join(save_path, 'modules_statistic_results.json')
dump(scan_data, json_path, indent=2)
print(f'Result has been saved to {json_path}')
return scan_data
|
from typing import List, cast
from llama_index.core.indices.vector_store.base import VectorStoreIndex
from llama_index.core.schema import (
Document,
NodeRelationship,
QueryBundle,
RelatedNodeInfo,
TextNode,
ImageNode,
)
from llama_index.core.vector_stores.simple import SimpleVectorStore
def test_simple_query(
documents: List[Document],
patch_llm_predictor,
patch_token_text_splitter,
mock_embed_model,
) -> None:
"""Test embedding query."""
index = VectorStoreIndex.from_documents(documents, embed_model=mock_embed_model)
# test embedding query
query_str = "What is?"
retriever = index.as_retriever(similarity_top_k=1)
nodes = retriever.retrieve(QueryBundle(query_str))
assert len(nodes) == 1
assert nodes[0].node.get_content() == "This is another test."
def test_query_and_similarity_scores(
patch_llm_predictor,
patch_token_text_splitter,
) -> None:
"""Test that sources nodes have similarity scores."""
doc_text = (
"Hello world.\nThis is a test.\nThis is another test.\nThis is a test v2."
)
document = Document(text=doc_text)
index = VectorStoreIndex.from_documents([document])
# test embedding query
query_str = "What is?"
retriever = index.as_retriever()
nodes = retriever.retrieve(QueryBundle(query_str))
assert len(nodes) > 0
assert nodes[0].score is not None
def test_simple_check_ids(
patch_llm_predictor,
patch_token_text_splitter,
) -> None:
"""Test build VectorStoreIndex."""
ref_doc_id = "ref_doc_id_test"
source_rel = {NodeRelationship.SOURCE: RelatedNodeInfo(node_id=ref_doc_id)}
all_nodes = [
TextNode(text="Hello world.", id_="node1", relationships=source_rel),
TextNode(text="This is a test.", id_="node2", relationships=source_rel),
TextNode(text="This is another test.", id_="node3", relationships=source_rel),
TextNode(text="This is a test v2.", id_="node4", relationships=source_rel),
]
index = VectorStoreIndex(all_nodes)
# test query
query_str = "What is?"
retriever = index.as_retriever()
nodes = retriever.retrieve(QueryBundle(query_str))
assert nodes[0].node.get_content() == "This is another test."
assert nodes[0].node.ref_doc_id == "ref_doc_id_test"
assert nodes[0].node.node_id == "node3"
vector_store = cast(SimpleVectorStore, index._vector_store)
assert "node3" in vector_store._data.embedding_dict
assert "node3" in vector_store._data.text_id_to_ref_doc_id
def test_query(
patch_llm_predictor,
patch_token_text_splitter,
) -> None:
"""Test embedding query."""
doc_text = (
"Hello world.\nThis is a test.\nThis is another test.\nThis is a test v2."
)
document = Document(text=doc_text)
index = VectorStoreIndex.from_documents([document])
# test embedding query
query_str = "What is?"
retriever = index.as_retriever()
_ = retriever.retrieve(QueryBundle(query_str))
def test_query_image_node() -> None:
"""Test embedding query."""
image_node = ImageNode(image="potato")
index = VectorStoreIndex.from_documents([])
index.insert_nodes([image_node])
# test embedding query
query_str = "What is?"
retriever = index.as_retriever()
results = retriever.retrieve(QueryBundle(query_str))
assert len(results) == 1
assert results[0].node.node_id == image_node.node_id
assert isinstance(results[0].node, ImageNode)
assert results[0].node.image == "potato"
|
from typing import List, cast
from llama_index.core.indices.vector_store.base import VectorStoreIndex
from llama_index.core.schema import (
Document,
NodeRelationship,
QueryBundle,
RelatedNodeInfo,
TextNode,
)
from llama_index.core.vector_stores.simple import SimpleVectorStore
def test_simple_query(
documents: List[Document],
patch_llm_predictor,
patch_token_text_splitter,
mock_embed_model,
) -> None:
"""Test embedding query."""
index = VectorStoreIndex.from_documents(documents, embed_model=mock_embed_model)
# test embedding query
query_str = "What is?"
retriever = index.as_retriever(similarity_top_k=1)
nodes = retriever.retrieve(QueryBundle(query_str))
assert len(nodes) == 1
assert nodes[0].node.get_content() == "This is another test."
def test_query_and_similarity_scores(
patch_llm_predictor,
patch_token_text_splitter,
) -> None:
"""Test that sources nodes have similarity scores."""
doc_text = (
"Hello world.\nThis is a test.\nThis is another test.\nThis is a test v2."
)
document = Document(text=doc_text)
index = VectorStoreIndex.from_documents([document])
# test embedding query
query_str = "What is?"
retriever = index.as_retriever()
nodes = retriever.retrieve(QueryBundle(query_str))
assert len(nodes) > 0
assert nodes[0].score is not None
def test_simple_check_ids(
patch_llm_predictor,
patch_token_text_splitter,
) -> None:
"""Test build VectorStoreIndex."""
ref_doc_id = "ref_doc_id_test"
source_rel = {NodeRelationship.SOURCE: RelatedNodeInfo(node_id=ref_doc_id)}
all_nodes = [
TextNode(text="Hello world.", id_="node1", relationships=source_rel),
TextNode(text="This is a test.", id_="node2", relationships=source_rel),
TextNode(text="This is another test.", id_="node3", relationships=source_rel),
TextNode(text="This is a test v2.", id_="node4", relationships=source_rel),
]
index = VectorStoreIndex(all_nodes)
# test query
query_str = "What is?"
retriever = index.as_retriever()
nodes = retriever.retrieve(QueryBundle(query_str))
assert nodes[0].node.get_content() == "This is another test."
assert nodes[0].node.ref_doc_id == "ref_doc_id_test"
assert nodes[0].node.node_id == "node3"
vector_store = cast(SimpleVectorStore, index._vector_store)
assert "node3" in vector_store._data.embedding_dict
assert "node3" in vector_store._data.text_id_to_ref_doc_id
def test_query(
patch_llm_predictor,
patch_token_text_splitter,
) -> None:
"""Test embedding query."""
doc_text = (
"Hello world.\nThis is a test.\nThis is another test.\nThis is a test v2."
)
document = Document(text=doc_text)
index = VectorStoreIndex.from_documents([document])
# test embedding query
query_str = "What is?"
retriever = index.as_retriever()
_ = retriever.retrieve(QueryBundle(query_str))
|
_base_ = 'cascade-mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_1.6gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')),
neck=dict(
type='FPN',
in_channels=[72, 168, 408, 912],
out_channels=256,
num_outs=5))
|
_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_1.6gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')),
neck=dict(
type='FPN',
in_channels=[72, 168, 408, 912],
out_channels=256,
num_outs=5))
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
import torch.nn as nn
from mmengine.runner import autocast
from mmengine.utils import digit_version
from mmengine.utils.dl_utils import TORCH_VERSION
class TestAmp(unittest.TestCase):
def test_autocast(self):
if not torch.cuda.is_available():
if digit_version(TORCH_VERSION) < digit_version('1.10.0'):
# `torch.cuda.amp.autocast` is only support in gpu mode, if
# cuda is not available, it will return an empty context and
# should not accept any arguments.
with self.assertRaisesRegex(RuntimeError,
'If pytorch versions is '):
with autocast():
pass
with autocast(enabled=False):
layer = nn.Conv2d(1, 1, 1)
res = layer(torch.randn(1, 1, 1, 1))
self.assertEqual(res.dtype, torch.float32)
else:
with autocast(device_type='cpu'):
# torch.autocast support cpu mode.
layer = nn.Conv2d(1, 1, 1)
res = layer(torch.randn(1, 1, 1, 1))
self.assertIn(res.dtype, (torch.bfloat16, torch.float16))
with autocast(enabled=False):
res = layer(torch.randn(1, 1, 1, 1))
self.assertEqual(res.dtype, torch.float32)
else:
if digit_version(TORCH_VERSION) < digit_version('1.10.0'):
devices = ['cuda']
else:
devices = ['cpu', 'cuda']
for device in devices:
with autocast(device_type=device):
# torch.autocast support cpu and cuda mode.
layer = nn.Conv2d(1, 1, 1).to(device)
res = layer(torch.randn(1, 1, 1, 1).to(device))
self.assertIn(res.dtype, (torch.bfloat16, torch.float16))
with autocast(enabled=False, device_type=device):
res = layer(torch.randn(1, 1, 1, 1).to(device))
self.assertEqual(res.dtype, torch.float32)
# Test with fp32_enabled
with autocast(enabled=False, device_type=device):
layer = nn.Conv2d(1, 1, 1).to(device)
res = layer(torch.randn(1, 1, 1, 1).to(device))
self.assertEqual(res.dtype, torch.float32)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
import torch.nn as nn
from mmengine.runner import autocast
from mmengine.utils import TORCH_VERSION, digit_version
class TestAmp(unittest.TestCase):
def test_autocast(self):
if not torch.cuda.is_available():
if digit_version(TORCH_VERSION) < digit_version('1.10.0'):
# `torch.cuda.amp.autocast` is only support in gpu mode, if
# cuda is not available, it will return an empty context and
# should not accept any arguments.
with self.assertRaisesRegex(RuntimeError,
'If pytorch versions is '):
with autocast():
pass
with autocast(enabled=False):
layer = nn.Conv2d(1, 1, 1)
res = layer(torch.randn(1, 1, 1, 1))
self.assertEqual(res.dtype, torch.float32)
else:
with autocast(device_type='cpu'):
# torch.autocast support cpu mode.
layer = nn.Conv2d(1, 1, 1)
res = layer(torch.randn(1, 1, 1, 1))
self.assertIn(res.dtype, (torch.bfloat16, torch.float16))
with autocast(enabled=False):
res = layer(torch.randn(1, 1, 1, 1))
self.assertEqual(res.dtype, torch.float32)
else:
if digit_version(TORCH_VERSION) < digit_version('1.10.0'):
devices = ['cuda']
else:
devices = ['cpu', 'cuda']
for device in devices:
with autocast(device_type=device):
# torch.autocast support cpu and cuda mode.
layer = nn.Conv2d(1, 1, 1).to(device)
res = layer(torch.randn(1, 1, 1, 1).to(device))
self.assertIn(res.dtype, (torch.bfloat16, torch.float16))
with autocast(enabled=False, device_type=device):
res = layer(torch.randn(1, 1, 1, 1).to(device))
self.assertEqual(res.dtype, torch.float32)
# Test with fp32_enabled
with autocast(enabled=False, device_type=device):
layer = nn.Conv2d(1, 1, 1).to(device)
res = layer(torch.randn(1, 1, 1, 1).to(device))
self.assertEqual(res.dtype, torch.float32)
|
import numpy as np
from docarray.proto import DocumentProto, NdArrayProto, NodeProto
from docarray.typing import Tensor
def test_nested_item_proto():
NodeProto(text='hello')
NodeProto(nested=DocumentProto())
def test_nested_optional_item_proto():
NodeProto()
def test_ndarray():
nd_proto = NdArrayProto()
original_tensor = np.zeros((3, 224, 224))
Tensor._flush_tensor_to_proto(nd_proto, value=original_tensor)
nested_item = NodeProto(tensor=nd_proto)
tensor = Tensor._read_from_proto(nested_item.tensor)
assert (tensor == original_tensor).all()
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
nd_proto = NdArrayProto()
original_tensor = np.zeros((3, 224, 224))
Tensor._flush_tensor_to_proto(nd_proto, value=original_tensor)
nested_item2 = NodeProto(tensor=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocumentProto(data=data)
|
import numpy as np
from docarray.proto import DocumentProto, NdArrayProto, NodeProto
from docarray.typing import Tensor
def test_nested_item_proto():
NodeProto(text='hello')
NodeProto(nested=DocumentProto())
def test_nested_optional_item_proto():
NodeProto()
def test_ndarray():
nd_proto = NdArrayProto()
original_tensor = np.zeros((3, 224, 224))
Tensor.flush_ndarray(nd_proto, value=original_tensor)
nested_item = NodeProto(tensor=nd_proto)
tensor = Tensor.read_ndarray(nested_item.tensor)
assert (tensor == original_tensor).all()
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
nd_proto = NdArrayProto()
original_tensor = np.zeros((3, 224, 224))
Tensor.flush_ndarray(nd_proto, value=original_tensor)
nested_item2 = NodeProto(tensor=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocumentProto(data=data)
|
import time
import pytest
from jina import Executor, Flow
SLOW_EXECUTOR_SLEEP_TIME = 3
class SlowExecutor(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
time.sleep(SLOW_EXECUTOR_SLEEP_TIME)
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_gateway_warmup_fast_executor(protocol, capfd):
flow = Flow(protocol=protocol).add()
with flow:
time.sleep(1)
out, _ = capfd.readouterr()
assert 'recv _status' in out
assert out.count('recv _status') == 1
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_gateway_warmup_with_replicas_and_shards(protocol, capfd):
flow = (
Flow(protocol=protocol)
.add(name='executor0', shards=2)
.add(name='executor1', replicas=2)
)
with flow:
time.sleep(1)
out, _ = capfd.readouterr()
assert 'recv _status' in out
# 2 calls from gateway runtime to deployments
# 2 calls from head to shards
# 1 call from the gateway to the head runtime warmup adds an additional call to any shard
assert out.count('recv _status') == 5
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_gateway_warmup_slow_executor(protocol, capfd):
flow = Flow(protocol=protocol).add(name='slowExecutor', uses='SlowExecutor')
with flow:
# requires high sleep time to account for Flow readiness and properly capture the output logs
time.sleep(SLOW_EXECUTOR_SLEEP_TIME * 3)
out, _ = capfd.readouterr()
assert 'recv _status' in out
assert out.count('recv _status') == 1
@pytest.mark.asyncio
async def test_multi_protocol_gateway_warmup_fast_executor(port_generator, capfd):
http_port = port_generator()
grpc_port = port_generator()
websocket_port = port_generator()
flow = (
Flow()
.config_gateway(
port=[http_port, grpc_port, websocket_port],
protocol=['http', 'grpc', 'websocket'],
)
.add()
)
with flow:
time.sleep(1)
out, _ = capfd.readouterr()
assert 'recv _status' in out
assert out.count('recv _status') == 1
@pytest.mark.asyncio
async def test_multi_protocol_gateway_warmup_slow_executor(port_generator, capfd):
http_port = port_generator()
grpc_port = port_generator()
websocket_port = port_generator()
flow = (
Flow()
.config_gateway(
port=[http_port, grpc_port, websocket_port],
protocol=['http', 'grpc', 'websocket'],
)
.add(name='slowExecutor', uses='SlowExecutor')
)
with flow:
# requires high sleep time to account for Flow readiness and properly capture the output logs
time.sleep(SLOW_EXECUTOR_SLEEP_TIME * 3)
out, _ = capfd.readouterr()
assert 'recv _status' in out
assert out.count('recv _status') == 1
|
import threading
import time
import pytest
from jina import Executor, Flow
SLOW_EXECUTOR_SLEEP_TIME = 3
class SlowExecutor(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
time.sleep(SLOW_EXECUTOR_SLEEP_TIME)
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_gateway_warmup_fast_executor(protocol, capfd):
flow = Flow(protocol=protocol).add()
with flow:
time.sleep(1)
out, _ = capfd.readouterr()
assert 'recv _status' in out
assert out.count('recv _status') == 1
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_gateway_warmup_with_replicas_and_shards(protocol, capfd):
flow = (
Flow(protocol=protocol)
.add(name='executor0', shards=2)
.add(name='executor1', replicas=2)
)
with flow:
time.sleep(1)
out, _ = capfd.readouterr()
assert 'recv _status' in out
# 2 calls from gateway runtime to deployments
# 2 calls from head to shards
# 1 call from the gateway to the head runtime warmup adds an additional call to any shard
assert out.count('recv _status') == 5
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_gateway_warmup_slow_executor(protocol, capfd):
flow = Flow(protocol=protocol).add(name='slowExecutor', uses='SlowExecutor')
with flow:
# requires high sleep time to account for Flow readiness and properly capture the output logs
time.sleep(SLOW_EXECUTOR_SLEEP_TIME * 3)
out, _ = capfd.readouterr()
assert 'recv _status' in out
assert out.count('recv _status') == 1
@pytest.mark.asyncio
async def test_multi_protocol_gateway_warmup_fast_executor(port_generator, capfd):
http_port = port_generator()
grpc_port = port_generator()
websocket_port = port_generator()
flow = (
Flow()
.config_gateway(
port=[http_port, grpc_port, websocket_port],
protocol=['http', 'grpc', 'websocket'],
)
.add()
)
with flow:
time.sleep(1)
out, _ = capfd.readouterr()
assert 'recv _status' in out
assert out.count('recv _status') == 1
@pytest.mark.asyncio
async def test_multi_protocol_gateway_warmup_slow_executor(port_generator, capfd):
http_port = port_generator()
grpc_port = port_generator()
websocket_port = port_generator()
flow = (
Flow()
.config_gateway(
port=[http_port, grpc_port, websocket_port],
protocol=['http', 'grpc', 'websocket'],
)
.add(name='slowExecutor', uses='SlowExecutor')
)
with flow:
# requires high sleep time to account for Flow readiness and properly capture the output logs
time.sleep(SLOW_EXECUTOR_SLEEP_TIME * 3)
out, _ = capfd.readouterr()
assert 'recv _status' in out
assert out.count('recv _status') == 1
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_additional_imports = {}
_import_structure = {"pipeline_output": ["FluxPipelineOutput", "FluxPriorReduxPipelineOutput"]}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["modeling_flux"] = ["ReduxImageEncoder"]
_import_structure["pipeline_flux"] = ["FluxPipeline"]
_import_structure["pipeline_flux_control"] = ["FluxControlPipeline"]
_import_structure["pipeline_flux_control_img2img"] = ["FluxControlImg2ImgPipeline"]
_import_structure["pipeline_flux_controlnet"] = ["FluxControlNetPipeline"]
_import_structure["pipeline_flux_controlnet_image_to_image"] = ["FluxControlNetImg2ImgPipeline"]
_import_structure["pipeline_flux_controlnet_inpainting"] = ["FluxControlNetInpaintPipeline"]
_import_structure["pipeline_flux_fill"] = ["FluxFillPipeline"]
_import_structure["pipeline_flux_img2img"] = ["FluxImg2ImgPipeline"]
_import_structure["pipeline_flux_inpaint"] = ["FluxInpaintPipeline"]
_import_structure["pipeline_flux_prior_redux"] = ["FluxPriorReduxPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .modeling_flux import ReduxImageEncoder
from .pipeline_flux import FluxPipeline
from .pipeline_flux_control import FluxControlPipeline
from .pipeline_flux_control_img2img import FluxControlImg2ImgPipeline
from .pipeline_flux_controlnet import FluxControlNetPipeline
from .pipeline_flux_controlnet_image_to_image import FluxControlNetImg2ImgPipeline
from .pipeline_flux_controlnet_inpainting import FluxControlNetInpaintPipeline
from .pipeline_flux_fill import FluxFillPipeline
from .pipeline_flux_img2img import FluxImg2ImgPipeline
from .pipeline_flux_inpaint import FluxInpaintPipeline
from .pipeline_flux_prior_redux import FluxPriorReduxPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
for name, value in _additional_imports.items():
setattr(sys.modules[__name__], name, value)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_additional_imports = {}
_import_structure = {"pipeline_output": ["FluxPipelineOutput"]}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_flux"] = ["FluxPipeline"]
_import_structure["pipeline_flux_controlnet"] = ["FluxControlNetPipeline"]
_import_structure["pipeline_flux_controlnet_image_to_image"] = ["FluxControlNetImg2ImgPipeline"]
_import_structure["pipeline_flux_controlnet_inpainting"] = ["FluxControlNetInpaintPipeline"]
_import_structure["pipeline_flux_img2img"] = ["FluxImg2ImgPipeline"]
_import_structure["pipeline_flux_inpaint"] = ["FluxInpaintPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_flux import FluxPipeline
from .pipeline_flux_controlnet import FluxControlNetPipeline
from .pipeline_flux_controlnet_image_to_image import FluxControlNetImg2ImgPipeline
from .pipeline_flux_controlnet_inpainting import FluxControlNetInpaintPipeline
from .pipeline_flux_img2img import FluxImg2ImgPipeline
from .pipeline_flux_inpaint import FluxInpaintPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
for name, value in _additional_imports.items():
setattr(sys.modules[__name__], name, value)
|
import importlib
from typing import Any
from langchain.retrievers.document_compressors.base import DocumentCompressorPipeline
from langchain.retrievers.document_compressors.chain_extract import (
LLMChainExtractor,
)
from langchain.retrievers.document_compressors.chain_filter import (
LLMChainFilter,
)
from langchain.retrievers.document_compressors.cohere_rerank import CohereRerank
from langchain.retrievers.document_compressors.cross_encoder_rerank import (
CrossEncoderReranker,
)
from langchain.retrievers.document_compressors.embeddings_filter import (
EmbeddingsFilter,
)
from langchain.retrievers.document_compressors.listwise_rerank import (
LLMListwiseRerank,
)
_module_lookup = {
"FlashrankRerank": "langchain_community.document_compressors.flashrank_rerank",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
msg = f"module {__name__} has no attribute {name}"
raise AttributeError(msg)
__all__ = [
"CohereRerank",
"CrossEncoderReranker",
"DocumentCompressorPipeline",
"EmbeddingsFilter",
"FlashrankRerank",
"LLMChainExtractor",
"LLMChainFilter",
"LLMListwiseRerank",
]
|
import importlib
from typing import Any
from langchain.retrievers.document_compressors.base import DocumentCompressorPipeline
from langchain.retrievers.document_compressors.chain_extract import (
LLMChainExtractor,
)
from langchain.retrievers.document_compressors.chain_filter import (
LLMChainFilter,
)
from langchain.retrievers.document_compressors.cohere_rerank import CohereRerank
from langchain.retrievers.document_compressors.cross_encoder_rerank import (
CrossEncoderReranker,
)
from langchain.retrievers.document_compressors.embeddings_filter import (
EmbeddingsFilter,
)
from langchain.retrievers.document_compressors.listwise_rerank import (
LLMListwiseRerank,
)
_module_lookup = {
"FlashrankRerank": "langchain_community.document_compressors.flashrank_rerank",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
msg = f"module {__name__} has no attribute {name}"
raise AttributeError(msg)
__all__ = [
"DocumentCompressorPipeline",
"EmbeddingsFilter",
"FlashrankRerank",
"LLMListwiseRerank",
"LLMChainExtractor",
"LLMChainFilter",
"CohereRerank",
"CrossEncoderReranker",
]
|
"""
This basic example loads a pre-trained model from the web and uses it to
generate sentence embeddings for a given list of sentences.
"""
import logging
import numpy as np
from sentence_transformers import LoggingHandler, SentenceTransformer
#### Just some code to print debug information to stdout
np.set_printoptions(threshold=100)
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Load pre-trained Sentence Transformer Model. It will be downloaded automatically
model = SentenceTransformer("all-MiniLM-L6-v2")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
sentence_embeddings = model.encode(sentences)
# The result is a list of sentence embeddings as numpy arrays
for sentence, embedding in zip(sentences, sentence_embeddings):
print("Sentence:", sentence)
print("Embedding:", embedding)
print("")
|
"""
This basic example loads a pre-trained model from the web and uses it to
generate sentence embeddings for a given list of sentences.
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
import numpy as np
import logging
#### Just some code to print debug information to stdout
np.set_printoptions(threshold=100)
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Load pre-trained Sentence Transformer Model. It will be downloaded automatically
model = SentenceTransformer("all-MiniLM-L6-v2")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
sentence_embeddings = model.encode(sentences)
# The result is a list of sentence embeddings as numpy arrays
for sentence, embedding in zip(sentences, sentence_embeddings):
print("Sentence:", sentence)
print("Embedding:", embedding)
print("")
|
#!/usr/bin/env python
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.convert_to_parquet import ConvertToParquetCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def parse_unknown_args(unknown_args):
return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])}
def main():
parser = ArgumentParser(
"HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=False
)
commands_parser = parser.add_subparsers(help="datasets-cli command helpers")
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(commands_parser)
EnvironmentCommand.register_subcommand(commands_parser)
TestCommand.register_subcommand(commands_parser)
RunBeamCommand.register_subcommand(commands_parser)
DummyDataCommand.register_subcommand(commands_parser)
ConvertToParquetCommand.register_subcommand(commands_parser)
# Parse args
args, unknown_args = parser.parse_known_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
kwargs = parse_unknown_args(unknown_args)
# Run
service = args.func(args, **kwargs)
service.run()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def parse_unknown_args(unknown_args):
return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])}
def main():
parser = ArgumentParser(
"HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=False
)
commands_parser = parser.add_subparsers(help="datasets-cli command helpers")
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(commands_parser)
EnvironmentCommand.register_subcommand(commands_parser)
TestCommand.register_subcommand(commands_parser)
RunBeamCommand.register_subcommand(commands_parser)
DummyDataCommand.register_subcommand(commands_parser)
# Parse args
args, unknown_args = parser.parse_known_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
kwargs = parse_unknown_args(unknown_args)
# Run
service = args.func(args, **kwargs)
service.run()
if __name__ == "__main__":
main()
|
from .clip_image import CLIPImageEncoder
|
from .clip_image import CLIPImageEncoder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.