input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from sentence_transformers import SentenceTransformer
class TransformerSentenceEncoder(Executor):
"""
Encode the Document text into embedding.
"""
def __init__(
self,
model_name: str = 'all-MiniLM-L6-v2',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs
):
"""
:param model_name: The name of the sentence transformer to be used
:param device: Torch device to put the model on (e.g. 'cpu', 'cuda', 'cuda:1')
:param traversal_paths: Default traversal paths
:param batch_size: Batch size to be used in the encoder model
"""
super().__init__(*args, **kwargs)
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.model = SentenceTransformer(model_name, device=device)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: Dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the ``embedding`` attribute
of the docs.
:param docs: Documents to send to the encoder. They need to have the ``text``
attribute get an embedding.
:param parameters: Any additional parameters for the `encode` function.
"""
for batch in get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.traversal_paths),
batch_size=parameters.get('batch_size', self.batch_size),
needs_attr='text',
):
texts = batch.get_attributes('text')
with torch.no_grad():
embeddings = self.model.encode(texts)
for doc, embedding in zip(batch, embeddings):
doc.embedding = embedding
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from sentence_transformers import SentenceTransformer
class TransformerSentenceEncoder(Executor):
"""
Encode the Document text into embedding.
"""
def __init__(
self,
model_name: str = 'all-MiniLM-L6-v2',
device: str = 'cpu',
default_traversal_paths: Iterable[str] = ('r',),
default_batch_size: int = 32,
*args,
**kwargs
):
"""
:param model_name: The name of the sentence transformer to be used
:param device: Torch device to put the model on (e.g. 'cpu', 'cuda', 'cuda:1')
:param default_traversal_paths: Default traversal paths
:param default_batch_size: Batch size to be used in the encoder model
"""
super().__init__(*args, **kwargs)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.model = SentenceTransformer(model_name, device=device)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all docs with text and store the encodings in the ``embedding`` attribute
of the docs.
:param docs: Documents to send to the encoder. They need to have the ``text``
attribute get an embedding.
:param parameters: Any additional parameters for the `encode` function.
"""
for batch in get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
):
texts = batch.get_attributes('text')
with torch.no_grad():
embeddings = self.model.encode(texts)
for doc, embedding in zip(batch, embeddings):
doc.embedding = embedding
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .sana_transformer import SanaTransformer2DModel
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_hunyuan_video import HunyuanVideoTransformer3DModel
from .transformer_ltx import LTXVideoTransformer3DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_omnigen import OmniGenTransformer2DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .sana_transformer import SanaTransformer2DModel
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_hunyuan_video import HunyuanVideoTransformer3DModel
from .transformer_ltx import LTXVideoTransformer3DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
|
"""Standard LangChain interface tests"""
import base64
from pathlib import Path
from typing import Literal, cast
import httpx
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, HumanMessage
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import ChatOpenAI
REPO_ROOT_DIR = Path(__file__).parents[6]
class TestOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o-mini", "stream_usage": True}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> dict[
Literal["invoke", "stream"],
list[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {"invoke": ["reasoning_output", "cache_read_input"], "stream": []}
@property
def enable_vcr_tests(self) -> bool:
return True
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
llm = ChatOpenAI(model="gpt-4o-mini", stream_usage=True)
_invoke(llm, input_, stream)
# invoke twice so first invocation is cached
return _invoke(llm, input_, stream)
def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage:
llm = ChatOpenAI(model="o1-mini", stream_usage=True, temperature=1)
input_ = (
"explain the relationship between the 2008/9 economic crisis and the "
"startup ecosystem in the early 2010s"
)
return _invoke(llm, input_, stream)
@property
def supports_pdf_inputs(self) -> bool:
# OpenAI requires a filename for PDF inputs
# For now, we test with filename in OpenAI-specific tests
return False
def test_openai_pdf_inputs(self, model: BaseChatModel) -> None:
"""Test that the model can process PDF inputs."""
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
pdf_data = base64.b64encode(httpx.get(url).content).decode("utf-8")
message = HumanMessage(
[
{"type": "text", "text": "Summarize this document:"},
{
"type": "file",
"source_type": "base64",
"mime_type": "application/pdf",
"data": pdf_data,
"filename": "my-pdf", # OpenAI requires a filename
},
]
)
_ = model.invoke([message])
# Test OpenAI Chat Completions format
message = HumanMessage(
[
{"type": "text", "text": "Summarize this document:"},
{
"type": "file",
"file": {
"filename": "test file.pdf",
"file_data": f"data:application/pdf;base64,{pdf_data}",
},
},
]
)
_ = model.invoke([message])
def _invoke(llm: ChatOpenAI, input_: str, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
@pytest.mark.skip() # Test either finishes in 5 seconds or 5 minutes.
def test_audio_model() -> None:
class AudioModelTests(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[ChatOpenAI]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"model": "gpt-4o-audio-preview",
"temperature": 0,
"model_kwargs": {
"modalities": ["text", "audio"],
"audio": {"voice": "alloy", "format": "wav"},
},
}
@property
def supports_audio_inputs(self) -> bool:
return True
test_instance = AudioModelTests()
model = test_instance.chat_model_class(**test_instance.chat_model_params)
AudioModelTests().test_audio_inputs(model)
|
"""Standard LangChain interface tests"""
import base64
from pathlib import Path
from typing import Literal, cast
import httpx
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, HumanMessage
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import ChatOpenAI
REPO_ROOT_DIR = Path(__file__).parents[6]
class TestOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o-mini", "stream_usage": True}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> dict[
Literal["invoke", "stream"],
list[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {"invoke": ["reasoning_output", "cache_read_input"], "stream": []}
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
llm = ChatOpenAI(model="gpt-4o-mini", stream_usage=True)
_invoke(llm, input_, stream)
# invoke twice so first invocation is cached
return _invoke(llm, input_, stream)
def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage:
llm = ChatOpenAI(model="o1-mini", stream_usage=True, temperature=1)
input_ = (
"explain the relationship between the 2008/9 economic crisis and the "
"startup ecosystem in the early 2010s"
)
return _invoke(llm, input_, stream)
@property
def supports_pdf_inputs(self) -> bool:
# OpenAI requires a filename for PDF inputs
# For now, we test with filename in OpenAI-specific tests
return False
def test_openai_pdf_inputs(self, model: BaseChatModel) -> None:
"""Test that the model can process PDF inputs."""
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
pdf_data = base64.b64encode(httpx.get(url).content).decode("utf-8")
message = HumanMessage(
[
{"type": "text", "text": "Summarize this document:"},
{
"type": "file",
"source_type": "base64",
"mime_type": "application/pdf",
"data": pdf_data,
"filename": "my-pdf", # OpenAI requires a filename
},
]
)
_ = model.invoke([message])
# Test OpenAI Chat Completions format
message = HumanMessage(
[
{"type": "text", "text": "Summarize this document:"},
{
"type": "file",
"file": {
"filename": "test file.pdf",
"file_data": f"data:application/pdf;base64,{pdf_data}",
},
},
]
)
_ = model.invoke([message])
def _invoke(llm: ChatOpenAI, input_: str, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
@pytest.mark.skip() # Test either finishes in 5 seconds or 5 minutes.
def test_audio_model() -> None:
class AudioModelTests(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[ChatOpenAI]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"model": "gpt-4o-audio-preview",
"temperature": 0,
"model_kwargs": {
"modalities": ["text", "audio"],
"audio": {"voice": "alloy", "format": "wav"},
},
}
@property
def supports_audio_inputs(self) -> bool:
return True
test_instance = AudioModelTests()
model = test_instance.chat_model_class(**test_instance.chat_model_params)
AudioModelTests().test_audio_inputs(model)
|
"""Test Azure OpenAI Chat API wrapper."""
import os
from unittest import mock
import pytest
from langchain_core.messages import HumanMessage
from typing_extensions import TypedDict
from langchain_openai import AzureChatOpenAI
def test_initialize_azure_openai() -> None:
llm = AzureChatOpenAI( # type: ignore[call-arg]
azure_deployment="35-turbo-dev",
openai_api_version="2023-05-15",
azure_endpoint="my-base-url",
)
assert llm.deployment_name == "35-turbo-dev"
assert llm.openai_api_version == "2023-05-15"
assert llm.azure_endpoint == "my-base-url"
def test_initialize_more() -> None:
llm = AzureChatOpenAI( # type: ignore[call-arg]
api_key="xyz", # type: ignore[arg-type]
azure_endpoint="my-base-url",
azure_deployment="35-turbo-dev",
openai_api_version="2023-05-15",
temperature=0,
model="gpt-35-turbo",
model_version="0125",
)
assert llm.openai_api_key is not None
assert llm.openai_api_key.get_secret_value() == "xyz"
assert llm.azure_endpoint == "my-base-url"
assert llm.deployment_name == "35-turbo-dev"
assert llm.openai_api_version == "2023-05-15"
assert llm.temperature == 0
ls_params = llm._get_ls_params()
assert ls_params["ls_provider"] == "azure"
assert ls_params["ls_model_name"] == "gpt-35-turbo-0125"
def test_initialize_azure_openai_with_openai_api_base_set() -> None:
with mock.patch.dict(os.environ, {"OPENAI_API_BASE": "https://api.openai.com"}):
llm = AzureChatOpenAI( # type: ignore[call-arg, call-arg]
api_key="xyz", # type: ignore[arg-type]
azure_endpoint="my-base-url",
azure_deployment="35-turbo-dev",
openai_api_version="2023-05-15",
temperature=0,
openai_api_base=None,
)
assert llm.openai_api_key is not None
assert llm.openai_api_key.get_secret_value() == "xyz"
assert llm.azure_endpoint == "my-base-url"
assert llm.deployment_name == "35-turbo-dev"
assert llm.openai_api_version == "2023-05-15"
assert llm.temperature == 0
ls_params = llm._get_ls_params()
assert ls_params["ls_provider"] == "azure"
assert ls_params["ls_model_name"] == "35-turbo-dev"
def test_structured_output_old_model() -> None:
class Output(TypedDict):
"""output."""
foo: str
with pytest.warns(match="Cannot use method='json_schema'"):
llm = AzureChatOpenAI( # type: ignore[call-arg]
model="gpt-35-turbo",
azure_deployment="35-turbo-dev",
openai_api_version="2023-05-15",
azure_endpoint="my-base-url",
).with_structured_output(Output)
# assert tool calling was used instead of json_schema
assert "tools" in llm.steps[0].kwargs # type: ignore
assert "response_format" not in llm.steps[0].kwargs # type: ignore
def test_max_completion_tokens_in_payload() -> None:
llm = AzureChatOpenAI(
azure_deployment="o1-mini",
api_version="2024-12-01-preview",
azure_endpoint="my-base-url",
model_kwargs={"max_completion_tokens": 300},
)
messages = [HumanMessage("Hello")]
payload = llm._get_request_payload(messages)
assert payload == {
"messages": [{"content": "Hello", "role": "user"}],
"model": None,
"stream": False,
"max_completion_tokens": 300,
}
|
"""Test Azure OpenAI Chat API wrapper."""
import os
from unittest import mock
import pytest
from typing_extensions import TypedDict
from langchain_openai import AzureChatOpenAI
def test_initialize_azure_openai() -> None:
llm = AzureChatOpenAI( # type: ignore[call-arg]
azure_deployment="35-turbo-dev",
openai_api_version="2023-05-15",
azure_endpoint="my-base-url",
)
assert llm.deployment_name == "35-turbo-dev"
assert llm.openai_api_version == "2023-05-15"
assert llm.azure_endpoint == "my-base-url"
def test_initialize_more() -> None:
llm = AzureChatOpenAI( # type: ignore[call-arg]
api_key="xyz", # type: ignore[arg-type]
azure_endpoint="my-base-url",
azure_deployment="35-turbo-dev",
openai_api_version="2023-05-15",
temperature=0,
model="gpt-35-turbo",
model_version="0125",
)
assert llm.openai_api_key is not None
assert llm.openai_api_key.get_secret_value() == "xyz"
assert llm.azure_endpoint == "my-base-url"
assert llm.deployment_name == "35-turbo-dev"
assert llm.openai_api_version == "2023-05-15"
assert llm.temperature == 0
ls_params = llm._get_ls_params()
assert ls_params["ls_provider"] == "azure"
assert ls_params["ls_model_name"] == "gpt-35-turbo-0125"
def test_initialize_azure_openai_with_openai_api_base_set() -> None:
with mock.patch.dict(os.environ, {"OPENAI_API_BASE": "https://api.openai.com"}):
llm = AzureChatOpenAI( # type: ignore[call-arg, call-arg]
api_key="xyz", # type: ignore[arg-type]
azure_endpoint="my-base-url",
azure_deployment="35-turbo-dev",
openai_api_version="2023-05-15",
temperature=0,
openai_api_base=None,
)
assert llm.openai_api_key is not None
assert llm.openai_api_key.get_secret_value() == "xyz"
assert llm.azure_endpoint == "my-base-url"
assert llm.deployment_name == "35-turbo-dev"
assert llm.openai_api_version == "2023-05-15"
assert llm.temperature == 0
ls_params = llm._get_ls_params()
assert ls_params["ls_provider"] == "azure"
assert ls_params["ls_model_name"] == "35-turbo-dev"
def test_structured_output_old_model() -> None:
class Output(TypedDict):
"""output."""
foo: str
with pytest.warns(match="Cannot use method='json_schema'"):
llm = AzureChatOpenAI( # type: ignore[call-arg]
model="gpt-35-turbo",
azure_deployment="35-turbo-dev",
openai_api_version="2023-05-15",
azure_endpoint="my-base-url",
).with_structured_output(Output)
# assert tool calling was used instead of json_schema
assert "tools" in llm.steps[0].kwargs # type: ignore
assert "response_format" not in llm.steps[0].kwargs # type: ignore
|
"""
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely similar sentences, a lower threshold will find more sentence that are less similar.
A second parameter is 'min_community_size': Only communities with at least a certain number of sentences will be returned.
The method for finding the communities is extremely fast, for clustering 50k sentences it requires only 5 seconds (plus embedding comuptation).
In this example, we download a large set of questions from Quora and then find similar questions in this set.
"""
import csv
import os
import time
from sentence_transformers import SentenceTransformer, util
# Model for computing sentence embeddings. We use one trained for similar questions detection
model = SentenceTransformer("all-MiniLM-L6-v2")
# We download the Quora Duplicate Questions Dataset (https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs)
# and find similar question in it
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 50000 # We limit our corpus to only the first 50k questions
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, batch_size=64, show_progress_bar=True, convert_to_tensor=True)
print("Start clustering")
start_time = time.time()
# Two parameters to tune:
# min_cluster_size: Only consider cluster that have at least 25 elements
# threshold: Consider sentence pairs with a cosine-similarity larger than threshold as similar
clusters = util.community_detection(corpus_embeddings, min_community_size=25, threshold=0.75)
print("Clustering done after {:.2f} sec".format(time.time() - start_time))
# Print for all clusters the top 3 and bottom 3 elements
for i, cluster in enumerate(clusters):
print("\nCluster {}, #{} Elements ".format(i + 1, len(cluster)))
for sentence_id in cluster[0:3]:
print("\t", corpus_sentences[sentence_id])
print("\t", "...")
for sentence_id in cluster[-3:]:
print("\t", corpus_sentences[sentence_id])
|
"""
This is a more complex example on performing clustering on large scale dataset.
This examples find in a large set of sentences local communities, i.e., groups of sentences that are highly
similar. You can freely configure the threshold what is considered as similar. A high threshold will
only find extremely similar sentences, a lower threshold will find more sentence that are less similar.
A second parameter is 'min_community_size': Only communities with at least a certain number of sentences will be returned.
The method for finding the communities is extremely fast, for clustering 50k sentences it requires only 5 seconds (plus embedding comuptation).
In this example, we download a large set of questions from Quora and then find similar questions in this set.
"""
from sentence_transformers import SentenceTransformer, util
import os
import csv
import time
# Model for computing sentence embeddings. We use one trained for similar questions detection
model = SentenceTransformer('all-MiniLM-L6-v2')
# We donwload the Quora Duplicate Questions Dataset (https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs)
# and find similar question in it
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 50000 # We limit our corpus to only the first 50k questions
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row['question1'])
corpus_sentences.add(row['question2'])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, batch_size=64, show_progress_bar=True, convert_to_tensor=True)
print("Start clustering")
start_time = time.time()
#Two parameters to tune:
#min_cluster_size: Only consider cluster that have at least 25 elements
#threshold: Consider sentence pairs with a cosine-similarity larger than threshold as similar
clusters = util.community_detection(corpus_embeddings, min_community_size=25, threshold=0.75)
print("Clustering done after {:.2f} sec".format(time.time() - start_time))
#Print for all clusters the top 3 and bottom 3 elements
for i, cluster in enumerate(clusters):
print("\nCluster {}, #{} Elements ".format(i+1, len(cluster)))
for sentence_id in cluster[0:3]:
print("\t", corpus_sentences[sentence_id])
print("\t", "...")
for sentence_id in cluster[-3:]:
print("\t", corpus_sentences[sentence_id])
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Iterable, Optional
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
from laserembeddings import Laser
class LaserEncoder(Executor):
"""
LaserEncoder is a text encoder based on Facebook Research's LASER encoder.
This encoder is suitable for producing multi-lingual sentence embeddings, enabling
you to have sentences from multiple languages in the same latent space.
"""
def __init__(
self,
path_to_bpe_codes: Optional[str] = None,
path_to_bpe_vocab: Optional[str] = None,
path_to_encoder: Optional[str] = None,
download_data: bool = True,
default_language: str = 'en',
cpu: bool = False,
default_batch_size: int = 32,
default_traversal_paths: Iterable[str] = ('r',),
*args,
**kwargs,
):
"""
:param path_to_bpe_codes: path to bpe codes from Laser. Defaults to
``Laser.DEFAULT_BPE_CODES_FILE.``
:param path_to_bpe_vocab: path to bpe vocabs from Laser. Defaults to
``Laser.DEFAULT_BPE_VOCAB_FILE``.
:param path_to_encoder: path to the encoder from Laser. Defaults to
``Laser.DEFAULT_ENCODER_FILE``.
:param download_data: Whether data should be downloaded on initialization. This is
convenient when just trying out the encoder, but should be turned off in a
production setting (where you should already have the data on disk), as it can
lead to large startup times.
:param default_language: The default language of the text. Can be overriden by a
request parameter. The full list of possible values can be found at
[LASER](https://github.com/facebookresearch/LASER#supported-languages)
with the language code
([ISO 639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes))
:param cpu: if True, forces the use of the CPU even when a GPU is available.
:param default_batch_size: size of each batch
:param default_traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
"""
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self._path_to_bpe_codes = path_to_bpe_codes
self._path_to_bpe_vocab = path_to_bpe_vocab
self._path_to_encoder = path_to_encoder
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.default_language = default_language
if download_data:
self.logger.info("Downloading data for the Laser model")
subprocess.run(
['python', '-m', 'laserembeddings', 'download-models'], check=True
)
self.model = Laser(
bpe_codes=self._path_to_bpe_codes,
bpe_vocab=self._path_to_bpe_vocab,
encoder=self._path_to_encoder,
embedding_options={'cpu': cpu},
)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding attribute
of the docs.
:param docs: documents sent to the encoder. The docs must have the ``text``
attribute.
:param parameters: dictionary to define the ``traversal_path``, the
``batch_size`` and ``language``. For example,
``{'traversal_paths': ['r'], 'batch_size': 10}``. This will override the
default parameters set at init.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
for document_batch in document_batches_generator:
text_batch = [d.text for d in document_batch]
language = parameters.get('language', self.default_language)
embeddings = self.model.embed_sentences(text_batch, lang=language)
for document, embedding in zip(document_batch, embeddings):
document.embedding = embedding
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Iterable, Optional
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
from laserembeddings import Laser
class LaserEncoder(Executor):
"""
LaserEncoder is a text encoder based on Facebook Research's LASER encoder.
This encoder is suitable for producing multi-lingual sentence embeddings, enabling
you to have sentences from multiple languages in the same latent space.
:param path_to_bpe_codes: path to bpe codes from Laser. Defaults to
``Laser.DEFAULT_BPE_CODES_FILE.``
:param path_to_bpe_vocab: path to bpe vocabs from Laser. Defaults to
``Laser.DEFAULT_BPE_VOCAB_FILE``.
:param path_to_encoder: path to the encoder from Laser. Defaults to
``Laser.DEFAULT_ENCODER_FILE``.
:param download_data: Whether data should be downloaded on initialization. This is
convenient when just trying out the encoder, but should be turned off in a
production setting (where you should already have the data on disk), as it can
lead to large startup times.
:param default_language: The default language of the text. Can be overriden by a
request parameter. The full list of possible values can be found at
[LASER](https://github.com/facebookresearch/LASER#supported-languages)
with the language code
([ISO 639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes))
:param cpu: if True, forces the use of the CPU even when a GPU is available.
:param default_batch_size: size of each batch
:param default_traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param args: Additional positional arguments
:param kwargs: Additional keyword arguments
"""
def __init__(
self,
path_to_bpe_codes: Optional[str] = None,
path_to_bpe_vocab: Optional[str] = None,
path_to_encoder: Optional[str] = None,
download_data: bool = True,
default_language: str = 'en',
cpu: bool = False,
default_batch_size: int = 32,
default_traversal_paths: Iterable[str] = ('r',),
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.logger = JinaLogger(self.__class__.__name__)
self._path_to_bpe_codes = path_to_bpe_codes
self._path_to_bpe_vocab = path_to_bpe_vocab
self._path_to_encoder = path_to_encoder
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.default_language = default_language
if download_data:
self.logger.info("Downloading data for the Laser model")
subprocess.run(
['python', '-m', 'laserembeddings', 'download-models'], check=True
)
self.model = Laser(
bpe_codes=self._path_to_bpe_codes,
bpe_vocab=self._path_to_bpe_vocab,
encoder=self._path_to_encoder,
embedding_options={'cpu': cpu},
)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding attribute
of the docs.
:param docs: documents sent to the encoder. The docs must have the ``text``
attribute.
:param parameters: dictionary to define the ``traversal_path``, the
``batch_size`` and ``language``. For example,
``{'traversal_paths': ['r'], 'batch_size': 10}``. This will override the
default parameters set at init.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
for document_batch in document_batches_generator:
text_batch = [d.text for d in document_batch]
language = parameters.get('language', self.default_language)
embeddings = self.model.embed_sentences(text_batch, lang=language)
for document, embedding in zip(document_batch, embeddings):
document.embedding = embedding
|
import copy
from typing import Any, Dict, List, Tuple
_SPECIFIC_EXECUTOR_SEPARATOR = '__'
def _spit_key_and_executor_name(key_name: str) -> Tuple[str]:
"""Split a specific key into a key, name pair
ex: 'key__my_executor' will be split into 'key', 'my_executor'
:param key_name: key name of the param
:return: return the split 'key', 'executor_name' for the key_name
"""
key_split = key_name.split(_SPECIFIC_EXECUTOR_SEPARATOR)
new_key_name = key_split.pop(-1)
executor_name = ''.join(key_split)
return new_key_name, executor_name
def _get_name_from_replicas_name(name: str) -> Tuple[str]:
"""return the original name without the replicas
ex: 'exec1/rep-0' will be transform into 'exec1'
:param name: name of the DataRequest
:return: return the original name without the replicas
"""
return name.split('/')[0]
def _is_param_for_specific_executor(key_name: str) -> bool:
"""Tell if a key is for a specific Executor
ex: 'key' is for every Executor whereas 'my_executor__key' is only for 'my_executor'
:param key_name: key name of the param
:return: return True if key_name is for specific Executor, False otherwise
"""
if _SPECIFIC_EXECUTOR_SEPARATOR in key_name:
if key_name.startswith(_SPECIFIC_EXECUTOR_SEPARATOR) or key_name.endswith(
_SPECIFIC_EXECUTOR_SEPARATOR
):
return False
return True
else:
return False
def _parse_specific_params(parameters: Dict, executor_name: str):
"""Parse the parameters dictionary to filter executor specific parameters
:param parameters: dictionary container the parameters
:param executor_name: name of the Executor
:returns: the parsed parameters after applying filtering for the specific Executor
"""
parsed_params = copy.deepcopy(parameters)
for key in parameters:
if _is_param_for_specific_executor(key):
(
key_name,
key_executor_name,
) = _spit_key_and_executor_name(key)
if key_executor_name == executor_name:
parsed_params[key_name] = parameters[key]
del parsed_params[key]
specific_parameters = parameters.get(executor_name, None)
if specific_parameters:
parsed_params.update(**specific_parameters)
return parsed_params
_DEFAULT_GRPC_OPTION = {
'grpc.max_send_message_length': -1,
'grpc.max_receive_message_length': -1,
# for the following see this blog post for the choice of default value https://cs.mcgill.ca/~mxia3/2019/02/23/Using-gRPC-in-Production/
'grpc.keepalive_time_ms': 10000,
# send keepalive ping every 10 second, default is 2 hours.
'grpc.keepalive_timeout_ms': 5000,
# keepalive ping time out after 5 seconds, default is 20 seconds
'grpc.keepalive_permit_without_calls': True,
# allow keepalive pings when there's no gRPC calls
'grpc.http2.max_pings_without_data': 0,
# allow unlimited amount of keepalive pings without data
'grpc.http2.min_time_between_pings_ms': 10000,
# allow grpc pings from client every 10 seconds
'grpc.http2.min_ping_interval_without_data_ms': 5000,
# allow grpc pings from client without data every 5 seconds
}
def _get_grpc_server_options(option_from_args: Dict) -> List[Tuple[str, Any]]:
"""transform dict of args into grpc option, will merge the args wit the default args
:param option_from_args: a dict of argument
:return: grpc option i.e a list of tuple of key value
"""
option_from_args = (
{**_DEFAULT_GRPC_OPTION, **option_from_args}
if option_from_args
else _DEFAULT_GRPC_OPTION
) # merge new and default args
return list(option_from_args.items())
|
import copy
from typing import Dict, Tuple
from jina.serve.runtimes.request_handlers.data_request_handler import DataRequestHandler
_SPECIFIC_EXECUTOR_SEPARATOR = '__'
def _spit_key_and_executor_name(key_name: str) -> Tuple[str]:
"""Split a specific key into a key, name pair
ex: 'key__my_executor' will be split into 'key', 'my_executor'
:param key_name: key name of the param
:return: return the split 'key', 'executor_name' for the key_name
"""
key_split = key_name.split(_SPECIFIC_EXECUTOR_SEPARATOR)
new_key_name = key_split.pop(-1)
executor_name = ''.join(key_split)
return new_key_name, executor_name
def _get_name_from_replicas_name(name: str) -> Tuple[str]:
"""return the original name without the replicas
ex: 'exec1/rep-0' will be transform into 'exec1'
:param name: name of the DataRequest
:return: return the original name without the replicas
"""
return name.split('/')[0]
def _is_param_for_specific_executor(key_name: str) -> bool:
"""Tell if a key is for a specific Executor
ex: 'key' is for every Executor whereas 'my_executor__key' is only for 'my_executor'
:param key_name: key name of the param
:return: return True if key_name is for specific Executor, False otherwise
"""
if _SPECIFIC_EXECUTOR_SEPARATOR in key_name:
if key_name.startswith(_SPECIFIC_EXECUTOR_SEPARATOR) or key_name.endswith(
_SPECIFIC_EXECUTOR_SEPARATOR
):
return False
return True
else:
return False
def _parse_specific_params(parameters: Dict, executor_name: str):
"""Parse the parameters dictionary to filter executor specific parameters
:param parameters: dictionary container the parameters
:param executor_name: name of the Executor
:returns: the parsed parameters after applying filtering for the specific Executor
"""
parsed_params = copy.deepcopy(parameters)
for key in parameters:
if _is_param_for_specific_executor(key):
(
key_name,
key_executor_name,
) = _spit_key_and_executor_name(key)
if key_executor_name == executor_name:
parsed_params[key_name] = parameters[key]
del parsed_params[key]
specific_parameters = parameters.get(executor_name, None)
if specific_parameters:
parsed_params.update(**specific_parameters)
return parsed_params
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import import_vectors
from ..hnswlib_searcher import HnswlibSearcher
# fix the seed here
np.random.seed(500)
docs = DocumentArray([Document(embedding=np.random.random(10)) for i in range(10)])
search_doc = DocumentArray([Document(embedding=np.random.random(10))])
DUMP_PATH = 'tests/dump1'
TOP_K = 5
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
yield metas
del os.environ['TEST_WORKSPACE']
@pytest.mark.parametrize(
['metric', 'is_distance'],
[
('l2', True),
('ip', True),
('cosine', True),
('l2', False),
('ip', False),
('cosine', False),
],
)
def test_metric(tmpdir, metric, is_distance):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(
dump_path=DUMP_PATH,
default_top_k=TOP_K,
metas=metas,
metric=metric,
is_distance=is_distance,
runtime_args=runtime_args,
)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == TOP_K
for i in range(len(docs[0].matches) - 1):
if not is_distance:
assert (
docs[0].matches[i].scores[metric].value
>= docs[0].matches[i + 1].scores[metric].value
)
else:
assert (
docs[0].matches[i].scores[metric].value
<= docs[0].matches[i + 1].scores[metric].value
)
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[1] / 'config.yml'))
assert ex.metric == 'cosine'
def test_query_vector(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(
dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, runtime_args=runtime_args
)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
ids, vecs = import_vectors(DUMP_PATH, str(0))
ids = np.array(list(ids))
vecs = np.array(list(vecs))
assert len(docs) == 1
assert len(docs[0].matches) == TOP_K
assert docs[0].matches[0].id in ids
assert len(docs[0].matches[0].embedding) == 7
assert docs[0].matches[0].embedding in vecs
da = DocumentArray([Document(id=0), Document(id=1), Document(id=2)])
indexer.fill_embedding(da)
for i, doc in enumerate(da):
assert list(doc.embedding)
def test_none_doc(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(
dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, runtime_args=runtime_args
)
indexer.search(None, {})
indexer.fill_embedding(None)
def test_query_vector_empty(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(
default_top_k=TOP_K, metas=metas, runtime_args=runtime_args
)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == 0
def test_flow(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
flow = Flow().add(
uses=HnswlibSearcher,
uses_with={'dump_path': DUMP_PATH, 'default_top_k': TOP_K},
uses_metas=metas,
runtime_args=runtime_args,
)
with flow:
resp = flow.post(
on='/search',
inputs=DocumentArray([Document(embedding=np.random.random(7))]),
return_results=True,
)
assert len(resp[0].data.docs[0].matches) == TOP_K
doc_array = DocumentArray([Document(id=0), Document(id=1), Document(id=2)])
with flow:
resp = flow.post(on='/fill_embedding', inputs=doc_array, return_results=True)
for i, doc in enumerate(resp[0].data.docs):
assert doc.embedding
assert doc.embedding.dense.shape == [7]
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import import_vectors
from ..hnswlib_searcher import HnswlibSearcher
# fix the seed here
np.random.seed(500)
docs = DocumentArray([Document(embedding=np.random.random(10)) for i in range(10)])
search_doc = DocumentArray([Document(embedding=np.random.random(10))])
DUMP_PATH = 'tests/dump1'
TOP_K = 5
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
yield metas
del os.environ['TEST_WORKSPACE']
@pytest.mark.parametrize(['metric', 'is_distance'],
[('l2', True), ('ip', True), ('cosine', True),
('l2', False), ('ip', False), ('cosine', False)])
def test_metric(tmpdir, metric, is_distance):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, metric=metric, is_distance=is_distance, runtime_args=runtime_args)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == TOP_K
for i in range(len(docs[0].matches) - 1):
if not is_distance:
assert docs[0].matches[i].scores[metric].value >= docs[0].matches[i + 1].scores[metric].value
else:
assert docs[0].matches[i].scores[metric].value <= docs[0].matches[i + 1].scores[metric].value
def test_query_vector(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, runtime_args=runtime_args)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
ids, vecs = import_vectors(DUMP_PATH, str(0))
ids = np.array(list(ids))
vecs = np.array(list(vecs))
assert len(docs) == 1
assert len(docs[0].matches) == TOP_K
assert docs[0].matches[0].id in ids
assert len(docs[0].matches[0].embedding) == 7
assert docs[0].matches[0].embedding in vecs
da = DocumentArray([Document(id=0), Document(id=1), Document(id=2)])
indexer.fill_embedding(da)
for i, doc in enumerate(da):
assert list(doc.embedding)
def test_none_doc(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, runtime_args=runtime_args)
indexer.search(None, {})
indexer.fill_embedding(None)
def test_query_vector_empty(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = HnswlibSearcher(default_top_k=TOP_K, metas=metas, runtime_args=runtime_args)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == 0
def test_flow(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
flow = Flow().add(uses=HnswlibSearcher, override_with={'dump_path': DUMP_PATH, 'default_top_k': TOP_K},
override_metas=metas, runtime_args=runtime_args)
with flow:
resp = flow.post(
on='/search',
inputs=DocumentArray([Document(embedding=np.random.random(7))]),
return_results=True
)
assert len(resp[0].data.docs[0].matches) == TOP_K
doc_array = DocumentArray([Document(id=0), Document(id=1), Document(id=2)])
with flow:
resp = flow.post(
on='/fill_embedding',
inputs=doc_array,
return_results=True
)
for i, doc in enumerate(resp[0].data.docs):
assert doc.embedding
assert doc.embedding.dense.shape == [7]
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestGLIP(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(
['glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.language_model)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([
('glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py', ('cpu',
'cuda'))
])
def test_glip_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
# test custom_entities is True
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
texts=['a', 'b'],
custom_entities=True)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
# test custom_entities is False
# packed_inputs = demo_mm_inputs(
# 2, [[3, 128, 128], [3, 125, 130]],
# texts=['a', 'b'],
# custom_entities=False)
# data = detector.data_preprocessor(packed_inputs, False)
# # Test forward test
# detector.eval()
# with torch.no_grad():
# batch_results = detector.forward(**data, mode='predict')
# self.assertEqual(len(batch_results), 2)
# self.assertIsInstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestGLIP(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(
['glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.language_model)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([
('glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py', ('cpu',
'cuda'))
])
def test_glip_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
# test custom_entities is True
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
texts=['a', 'b'],
custom_entities=True)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
# test custom_entities is False
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
texts=['a', 'b'],
custom_entities=False)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
|
import gc
import unittest
import torch
from diffusers import (
StableDiffusionImg2ImgPipeline,
)
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
from .single_file_testing_utils import SDSingleFileTesterMixin
enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionImg2ImgPipeline
ckpt_path = (
"https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors"
)
original_config = (
"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
)
repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
init_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_img2img/sketch-mountains-input.png"
)
inputs = {
"prompt": "a fantasy landscape, concept art, high resolution",
"image": init_image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
@slow
@require_torch_accelerator
class StableDiffusion21Img2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionImg2ImgPipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.safetensors"
original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml"
repo_id = "stabilityai/stable-diffusion-2-1"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
init_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_img2img/sketch-mountains-input.png"
)
inputs = {
"prompt": "a fantasy landscape, concept art, high resolution",
"image": init_image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
|
import gc
import unittest
import torch
from diffusers import (
StableDiffusionImg2ImgPipeline,
)
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
slow,
)
from .single_file_testing_utils import SDSingleFileTesterMixin
enable_full_determinism()
@slow
@require_torch_gpu
class StableDiffusionImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionImg2ImgPipeline
ckpt_path = (
"https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors"
)
original_config = (
"https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
)
repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
init_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_img2img/sketch-mountains-input.png"
)
inputs = {
"prompt": "a fantasy landscape, concept art, high resolution",
"image": init_image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class StableDiffusion21Img2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionImg2ImgPipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.safetensors"
original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml"
repo_id = "stabilityai/stable-diffusion-2-1"
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
init_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_img2img/sketch-mountains-input.png"
)
inputs = {
"prompt": "a fantasy landscape, concept art, high resolution",
"image": init_image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
|
_base_ = 'tridentnet_r50-caffe_ms-1x_coco.py'
# learning rate
max_epochs = 36
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
_base_ = 'tridentnet_r50_caffe_mstrain_1x_coco.py'
# learning rate
max_epochs = 36
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
import asyncio
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class MergerRetriever(BaseRetriever):
"""Retriever that merges the results of multiple retrievers."""
retrievers: list[BaseRetriever]
"""A list of retrievers to merge."""
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
return self.merge_documents(query, run_manager)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Asynchronously get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
return await self.amerge_documents(query, run_manager)
def merge_documents(
self,
query: str,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = [
retriever.invoke(
query,
config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")},
)
for i, retriever in enumerate(self.retrievers)
]
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
async def amerge_documents(
self,
query: str,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Asynchronously merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = await asyncio.gather(
*(
retriever.ainvoke(
query,
config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")},
)
for i, retriever in enumerate(self.retrievers)
),
)
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
|
import asyncio
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class MergerRetriever(BaseRetriever):
"""Retriever that merges the results of multiple retrievers."""
retrievers: list[BaseRetriever]
"""A list of retrievers to merge."""
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
return self.merge_documents(query, run_manager)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""
Asynchronously get the relevant documents for a given query.
Args:
query: The query to search for.
Returns:
A list of relevant documents.
"""
# Merge the results of the retrievers.
return await self.amerge_documents(query, run_manager)
def merge_documents(
self, query: str, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
"""
Merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = [
retriever.invoke(
query,
config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")},
)
for i, retriever in enumerate(self.retrievers)
]
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
async def amerge_documents(
self, query: str, run_manager: AsyncCallbackManagerForRetrieverRun
) -> list[Document]:
"""
Asynchronously merge the results of the retrievers.
Args:
query: The query to search for.
Returns:
A list of merged documents.
"""
# Get the results of all retrievers.
retriever_docs = await asyncio.gather(
*(
retriever.ainvoke(
query,
config={"callbacks": run_manager.get_child(f"retriever_{i + 1}")},
)
for i, retriever in enumerate(self.retrievers)
)
)
# Merge the results of the retrievers.
merged_documents = []
max_docs = max(map(len, retriever_docs), default=0)
for i in range(max_docs):
for retriever, doc in zip(self.retrievers, retriever_docs):
if i < len(doc):
merged_documents.append(doc[i])
return merged_documents
|
_base_ = [
'../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
val_evaluator = dict(metric='proposal_fast')
test_evaluator = val_evaluator
# inference on val dataset and dump the proposals with evaluate metric
# data_root = 'data/coco/'
# test_evaluator = [
# dict(
# type='DumpProposals',
# output_dir=data_root + 'proposals/',
# proposals_file='rpn_r50_fpn_1x_val2017.pkl'),
# dict(
# type='CocoMetric',
# ann_file=data_root + 'annotations/instances_val2017.json',
# metric='proposal_fast',
# backend_args={{_base_.backend_args}},
# format_only=False)
# ]
# inference on training dataset and dump the proposals without evaluate metric
# data_root = 'data/coco/'
# test_dataloader = dict(
# dataset=dict(
# ann_file='annotations/instances_train2017.json',
# data_prefix=dict(img='train2017/')))
#
# test_evaluator = [
# dict(
# type='DumpProposals',
# output_dir=data_root + 'proposals/',
# proposals_file='rpn_r50_fpn_1x_train2017.pkl'),
# ]
|
_base_ = [
'../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
val_evaluator = dict(metric='proposal_fast')
test_evaluator = val_evaluator
# inference on val dataset and dump the proposals with evaluate metric
# data_root = 'data/coco/'
# test_evaluator = [
# dict(
# type='DumpProposals',
# output_dir=data_root + 'proposals/',
# proposals_file='rpn_r50_fpn_1x_val2017.pkl'),
# dict(
# type='CocoMetric',
# ann_file=data_root + 'annotations/instances_val2017.json',
# metric='proposal_fast',
# file_client_args={{_base_.file_client_args}},
# format_only=False)
# ]
# inference on training dataset and dump the proposals without evaluate metric
# data_root = 'data/coco/'
# test_dataloader = dict(
# dataset=dict(
# ann_file='annotations/instances_train2017.json',
# data_prefix=dict(img='train2017/')))
#
# test_evaluator = [
# dict(
# type='DumpProposals',
# output_dir=data_root + 'proposals/',
# proposals_file='rpn_r50_fpn_1x_train2017.pkl'),
# ]
|
from typing import TYPE_CHECKING
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from google.protobuf import __version__ as __pb__version__
else:
protobuf = import_library('google.protobuf', raise_error=True)
__pb__version__ = protobuf.__version__
if __pb__version__.startswith('4'):
from docarray.proto.pb.docarray_pb2 import (
DictOfAnyProto,
DocListProto,
DocProto,
DocVecProto,
ListOfAnyProto,
ListOfDocArrayProto,
ListOfDocVecProto,
NdArrayProto,
NodeProto,
)
else:
from docarray.proto.pb2.docarray_pb2 import (
DictOfAnyProto,
DocListProto,
DocProto,
DocVecProto,
ListOfAnyProto,
ListOfDocArrayProto,
ListOfDocVecProto,
NdArrayProto,
NodeProto,
)
__all__ = [
'DocListProto',
'DocProto',
'NdArrayProto',
'NodeProto',
'DocVecProto',
'DocListProto',
'ListOfDocArrayProto',
'ListOfDocVecProto',
'ListOfAnyProto',
'DictOfAnyProto',
]
|
from typing import TYPE_CHECKING
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from google.protobuf import __version__ as __pb__version__
else:
protobuf = import_library('google.protobuf', raise_error=True)
__pb__version__ = protobuf.__version__
if __pb__version__.startswith('4'):
from docarray.proto.pb.docarray_pb2 import (
DictOfAnyProto,
DocListProto,
DocProto,
DocVecProto,
ListOfAnyProto,
ListOfDocArrayProto,
NdArrayProto,
NodeProto,
)
else:
from docarray.proto.pb2.docarray_pb2 import (
DictOfAnyProto,
DocListProto,
DocProto,
DocVecProto,
ListOfAnyProto,
ListOfDocArrayProto,
NdArrayProto,
NodeProto,
)
__all__ = [
'DocListProto',
'DocProto',
'NdArrayProto',
'NodeProto',
'DocVecProto',
'DocListProto',
'ListOfDocArrayProto',
'ListOfAnyProto',
'DictOfAnyProto',
]
|
"""
This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend
existent sentence embedding models to new languages.
The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages.
The parallel sentences corpus cannot be downloaded automatically. It is available for research purposes only (CC-BY-NC).
The training procedure can be found in the files make_multilingual.py.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import os
import sentence_transformers.util
import gzip
import csv
from tqdm.autonotebook import tqdm
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
dev_sentences = 1000 # Number of sentences we want to use for development
download_url = "https://sbert.net/datasets/parallel-sentences.tsv.gz" # Specify parallel sentences URL here
parallel_sentences_path = "../datasets/parallel-sentences.tsv.gz" # Path of the parallel-sentences.tsv.gz file.
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(parallel_sentences_path), exist_ok=True)
if not os.path.exists(parallel_sentences_path):
print("parallel-sentences.tsv.gz does not exists. Try to download from server")
sentence_transformers.util.http_get(download_url, parallel_sentences_path)
os.makedirs(parallel_sentences_folder, exist_ok=True)
train_files = []
dev_files = []
files_to_create = []
for source_lang in source_languages:
for target_lang in target_languages:
output_filename_train = os.path.join(
parallel_sentences_folder, "talks-{}-{}-train.tsv.gz".format(source_lang, target_lang)
)
output_filename_dev = os.path.join(
parallel_sentences_folder, "talks-{}-{}-dev.tsv.gz".format(source_lang, target_lang)
)
train_files.append(output_filename_train)
dev_files.append(output_filename_dev)
if not os.path.exists(output_filename_train) or not os.path.exists(output_filename_dev):
files_to_create.append(
{
"src_lang": source_lang,
"trg_lang": target_lang,
"fTrain": gzip.open(output_filename_train, "wt", encoding="utf8"),
"fDev": gzip.open(output_filename_dev, "wt", encoding="utf8"),
"devCount": 0,
}
)
if len(files_to_create) > 0:
print(
"Parallel sentences files {} do not exist. Create these files now".format(
", ".join(map(lambda x: x["src_lang"] + "-" + x["trg_lang"], files_to_create))
)
)
with gzip.open(parallel_sentences_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for line in tqdm(reader, desc="Sentences"):
for outfile in files_to_create:
src_text = line[outfile["src_lang"]].strip()
trg_text = line[outfile["trg_lang"]].strip()
if src_text != "" and trg_text != "":
if outfile["devCount"] < dev_sentences:
outfile["devCount"] += 1
fOut = outfile["fDev"]
else:
fOut = outfile["fTrain"]
fOut.write("{}\t{}\n".format(src_text, trg_text))
for outfile in files_to_create:
outfile["fTrain"].close()
outfile["fDev"].close()
print("---DONE---")
|
"""
This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend
existent sentence embedding models to new languages.
The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages.
The parallel sentences corpus cannot be downloaded automatically. It is available for research purposes only (CC-BY-NC).
The training procedure can be found in the files make_multilingual.py and make_multilingual_sys.py.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import os
import sentence_transformers.util
import gzip
import csv
from tqdm.autonotebook import tqdm
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
dev_sentences = 1000 # Number of sentences we want to use for development
download_url = "https://sbert.net/datasets/parallel-sentences.tsv.gz" # Specify parallel sentences URL here
parallel_sentences_path = "../datasets/parallel-sentences.tsv.gz" # Path of the parallel-sentences.tsv.gz file.
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(parallel_sentences_path), exist_ok=True)
if not os.path.exists(parallel_sentences_path):
print("parallel-sentences.tsv.gz does not exists. Try to download from server")
sentence_transformers.util.http_get(download_url, parallel_sentences_path)
os.makedirs(parallel_sentences_folder, exist_ok=True)
train_files = []
dev_files = []
files_to_create = []
for source_lang in source_languages:
for target_lang in target_languages:
output_filename_train = os.path.join(
parallel_sentences_folder, "talks-{}-{}-train.tsv.gz".format(source_lang, target_lang)
)
output_filename_dev = os.path.join(
parallel_sentences_folder, "talks-{}-{}-dev.tsv.gz".format(source_lang, target_lang)
)
train_files.append(output_filename_train)
dev_files.append(output_filename_dev)
if not os.path.exists(output_filename_train) or not os.path.exists(output_filename_dev):
files_to_create.append(
{
"src_lang": source_lang,
"trg_lang": target_lang,
"fTrain": gzip.open(output_filename_train, "wt", encoding="utf8"),
"fDev": gzip.open(output_filename_dev, "wt", encoding="utf8"),
"devCount": 0,
}
)
if len(files_to_create) > 0:
print(
"Parallel sentences files {} do not exist. Create these files now".format(
", ".join(map(lambda x: x["src_lang"] + "-" + x["trg_lang"], files_to_create))
)
)
with gzip.open(parallel_sentences_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for line in tqdm(reader, desc="Sentences"):
for outfile in files_to_create:
src_text = line[outfile["src_lang"]].strip()
trg_text = line[outfile["trg_lang"]].strip()
if src_text != "" and trg_text != "":
if outfile["devCount"] < dev_sentences:
outfile["devCount"] += 1
fOut = outfile["fDev"]
else:
fOut = outfile["fTrain"]
fOut.write("{}\t{}\n".format(src_text, trg_text))
for outfile in files_to_create:
outfile["fTrain"].close()
outfile["fDev"].close()
print("---DONE---")
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.5"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.4.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
"""**Tracers** are classes for tracing runs.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> BaseTracer --> <name>Tracer # Examples: LangChainTracer, RootListenersTracer
--> <name> # Examples: LogStreamCallbackHandler
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.tracers.base import BaseTracer
from langchain_core.tracers.evaluation import EvaluatorCallbackHandler
from langchain_core.tracers.langchain import LangChainTracer
from langchain_core.tracers.log_stream import (
LogStreamCallbackHandler,
RunLog,
RunLogPatch,
)
from langchain_core.tracers.schemas import Run
from langchain_core.tracers.stdout import ConsoleCallbackHandler
__all__ = [
"BaseTracer",
"EvaluatorCallbackHandler",
"LangChainTracer",
"ConsoleCallbackHandler",
"Run",
"RunLog",
"RunLogPatch",
"LogStreamCallbackHandler",
]
_dynamic_imports = {
"BaseTracer": "base",
"EvaluatorCallbackHandler": "evaluation",
"LangChainTracer": "langchain",
"LogStreamCallbackHandler": "log_stream",
"RunLog": "log_stream",
"RunLogPatch": "log_stream",
"Run": "schemas",
"ConsoleCallbackHandler": "stdout",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Tracers** are classes for tracing runs.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> BaseTracer --> <name>Tracer # Examples: LangChainTracer, RootListenersTracer
--> <name> # Examples: LogStreamCallbackHandler
""" # noqa: E501
__all__ = [
"BaseTracer",
"EvaluatorCallbackHandler",
"LangChainTracer",
"ConsoleCallbackHandler",
"Run",
"RunLog",
"RunLogPatch",
"LogStreamCallbackHandler",
]
from langchain_core.tracers.base import BaseTracer
from langchain_core.tracers.evaluation import EvaluatorCallbackHandler
from langchain_core.tracers.langchain import LangChainTracer
from langchain_core.tracers.log_stream import (
LogStreamCallbackHandler,
RunLog,
RunLogPatch,
)
from langchain_core.tracers.schemas import Run
from langchain_core.tracers.stdout import ConsoleCallbackHandler
|
from typing import TYPE_CHECKING
import torch
if TYPE_CHECKING: # pragma: no cover
from torch import tensor
import numpy
def cosine(
x_mat: 'tensor', y_mat: 'tensor', eps: float = 1e-7, device: str = 'cpu'
) -> 'numpy.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: torch with ndim=2
:param y_mat: torch with ndim=2
:param eps: a small jitter to avoid divde by zero
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
x_mat = x_mat.to(device)
y_mat = y_mat.to(device)
a_n, b_n = x_mat.norm(dim=1)[:, None], y_mat.norm(dim=1)[:, None]
a_norm = x_mat / torch.clamp(a_n, min=eps)
b_norm = y_mat / torch.clamp(b_n, min=eps)
sim_mt = 1 - torch.mm(a_norm, b_norm.transpose(0, 1))
return sim_mt.cpu().detach().numpy()
def euclidean(x_mat: 'tensor', y_mat: 'tensor', device: str = 'cpu') -> 'numpy.ndarray':
"""Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: torch array with ndim=2
:param y_mat: torch array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
x_mat = x_mat.to(device)
y_mat = y_mat.to(device)
return torch.cdist(x_mat, y_mat).cpu().detach().numpy()
def sqeuclidean(
x_mat: 'tensor', y_mat: 'tensor', device: str = 'cpu'
) -> 'numpy.ndarray':
"""Squared euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: torch array with ndim=2
:param y_mat: torch array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
x_mat = x_mat.to(device)
y_mat = y_mat.to(device)
return (torch.cdist(x_mat, y_mat) ** 2).cpu().detach().numpy()
|
from typing import TYPE_CHECKING
import torch
if TYPE_CHECKING:
from torch import tensor
import numpy
def cosine(
x_mat: 'tensor', y_mat: 'tensor', eps: float = 1e-7, device: str = 'cpu'
) -> 'numpy.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: torch with ndim=2
:param y_mat: torch with ndim=2
:param eps: a small jitter to avoid divde by zero
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
x_mat = x_mat.to(device)
y_mat = y_mat.to(device)
a_n, b_n = x_mat.norm(dim=1)[:, None], y_mat.norm(dim=1)[:, None]
a_norm = x_mat / torch.clamp(a_n, min=eps)
b_norm = y_mat / torch.clamp(b_n, min=eps)
sim_mt = 1 - torch.mm(a_norm, b_norm.transpose(0, 1))
return sim_mt.cpu().detach().numpy()
def euclidean(x_mat: 'tensor', y_mat: 'tensor', device: str = 'cpu') -> 'numpy.ndarray':
"""Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: torch array with ndim=2
:param y_mat: torch array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
x_mat = x_mat.to(device)
y_mat = y_mat.to(device)
return torch.cdist(x_mat, y_mat).cpu().detach().numpy()
def sqeuclidean(
x_mat: 'tensor', y_mat: 'tensor', device: str = 'cpu'
) -> 'numpy.ndarray':
"""Squared euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: torch array with ndim=2
:param y_mat: torch array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
x_mat = x_mat.to(device)
y_mat = y_mat.to(device)
return (torch.cdist(x_mat, y_mat) ** 2).cpu().detach().numpy()
|
from uuid import UUID
import pytest
from pydantic import schema_json_of
from pydantic.tools import parse_obj_as
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import ID
@pytest.mark.parametrize(
'id', ['1234', 1234, UUID('cf57432e-809e-4353-adbd-9d5c0d733868')]
)
def test_id_validation(id):
parsed_id = parse_obj_as(ID, id)
assert parsed_id == str(id)
def test_json_schema():
schema_json_of(ID)
def test_dump_json():
id = parse_obj_as(ID, 1234)
orjson_dumps(id)
@pytest.mark.parametrize(
'id', ['1234', 1234, UUID('cf57432e-809e-4353-adbd-9d5c0d733868')]
)
def test_operators(id):
parsed_id = parse_obj_as(ID, id)
assert parsed_id == str(id)
assert parsed_id != 'aljdñjd'
assert str(id)[0:1] in parsed_id
assert 'docarray' not in parsed_id
|
from uuid import UUID
import pytest
from pydantic import schema_json_of
from pydantic.tools import parse_obj_as
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import ID
@pytest.mark.parametrize(
'id', ['1234', 1234, UUID('cf57432e-809e-4353-adbd-9d5c0d733868')]
)
def test_id_validation(id):
parsed_id = parse_obj_as(ID, id)
assert parsed_id == str(id)
def test_json_schema():
schema_json_of(ID)
def test_dump_json():
id = parse_obj_as(ID, 1234)
orjson_dumps(id)
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.13.2.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import traceback
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import sys
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import traceback
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import sys
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/losses.html#cosentloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
import unittest
import torch
from mmengine.config import Config
from mmengine.data import InstanceData
from mmengine.testing import assert_allclose
from mmdet.evaluation import INSTANCE_OFFSET
from mmdet.models.seg_heads.panoptic_fusion_heads import HeuristicFusionHead
class TestHeuristicFusionHead(unittest.TestCase):
def test_loss(self):
head = HeuristicFusionHead(num_things_classes=2, num_stuff_classes=2)
result = head.loss()
self.assertTrue(not head.with_loss)
self.assertDictEqual(result, dict())
def test_predict(self):
test_cfg = Config(dict(mask_overlap=0.5, stuff_area_limit=1))
head = HeuristicFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
mask_results = InstanceData()
mask_results.bboxes = torch.tensor([[0, 0, 1, 1], [1, 1, 2, 2]])
mask_results.labels = torch.tensor([0, 1])
mask_results.scores = torch.tensor([0.8, 0.7])
mask_results.masks = torch.tensor([[[1, 0], [0, 0]], [[0, 0],
[0, 1]]]).bool()
seg_preds_list = [
torch.tensor([[[0.2, 0.7], [0.3, 0.1]], [[0.2, 0.2], [0.6, 0.1]],
[[0.6, 0.1], [0.1, 0.8]]])
]
target_list = [
torch.tensor([[0 + 1 * INSTANCE_OFFSET, 2],
[3, 1 + 2 * INSTANCE_OFFSET]])
]
results_list = head.predict([mask_results], seg_preds_list)
for target, result in zip(target_list, results_list):
assert_allclose(result.sem_seg[0], target)
# test with no thing
head = HeuristicFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
mask_results = InstanceData()
mask_results.bboxes = torch.zeros((0, 4))
mask_results.labels = torch.zeros((0, )).long()
mask_results.scores = torch.zeros((0, ))
mask_results.masks = torch.zeros((0, 2, 2), dtype=torch.bool)
seg_preds_list = [
torch.tensor([[[0.2, 0.7], [0.3, 0.1]], [[0.2, 0.2], [0.6, 0.1]],
[[0.6, 0.1], [0.1, 0.8]]])
]
target_list = [torch.tensor([[4, 2], [3, 4]])]
results_list = head.predict([mask_results], seg_preds_list)
for target, result in zip(target_list, results_list):
assert_allclose(result.sem_seg[0], target)
|
import unittest
import torch
from mmengine.config import Config
from mmengine.data import InstanceData
from mmengine.testing import assert_allclose
from mmdet.core.evaluation import INSTANCE_OFFSET
from mmdet.models.seg_heads.panoptic_fusion_heads import HeuristicFusionHead
class TestHeuristicFusionHead(unittest.TestCase):
def test_loss(self):
head = HeuristicFusionHead(num_things_classes=2, num_stuff_classes=2)
result = head.loss()
self.assertTrue(not head.with_loss)
self.assertDictEqual(result, dict())
def test_predict(self):
test_cfg = Config(dict(mask_overlap=0.5, stuff_area_limit=1))
head = HeuristicFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
mask_results = InstanceData()
mask_results.bboxes = torch.tensor([[0, 0, 1, 1], [1, 1, 2, 2]])
mask_results.labels = torch.tensor([0, 1])
mask_results.scores = torch.tensor([0.8, 0.7])
mask_results.masks = torch.tensor([[[1, 0], [0, 0]], [[0, 0],
[0, 1]]]).bool()
seg_preds_list = [
torch.tensor([[[0.2, 0.7], [0.3, 0.1]], [[0.2, 0.2], [0.6, 0.1]],
[[0.6, 0.1], [0.1, 0.8]]])
]
target_list = [
torch.tensor([[0 + 1 * INSTANCE_OFFSET, 2],
[3, 1 + 2 * INSTANCE_OFFSET]])
]
results_list = head.predict([mask_results], seg_preds_list)
for target, result in zip(target_list, results_list):
assert_allclose(result.sem_seg[0], target)
# test with no thing
head = HeuristicFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
mask_results = InstanceData()
mask_results.bboxes = torch.zeros((0, 4))
mask_results.labels = torch.zeros((0, )).long()
mask_results.scores = torch.zeros((0, ))
mask_results.masks = torch.zeros((0, 2, 2), dtype=torch.bool)
seg_preds_list = [
torch.tensor([[[0.2, 0.7], [0.3, 0.1]], [[0.2, 0.2], [0.6, 0.1]],
[[0.6, 0.1], [0.1, 0.8]]])
]
target_list = [torch.tensor([[4, 2], [3, 4]])]
results_list = head.predict([mask_results], seg_preds_list)
for target, result in zip(target_list, results_list):
assert_allclose(result.sem_seg[0], target)
|
"""Markdown node parser."""
import re
from typing import Any, List, Optional, Sequence
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.utils import get_tqdm_iterable
class MarkdownNodeParser(NodeParser):
"""Markdown node parser.
Splits a document into Nodes using Markdown header-based splitting logic.
Each node contains its text content and the path of headers leading to it.
Args:
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
@classmethod
def from_defaults(
cls,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
) -> "MarkdownNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
)
def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]:
"""Get nodes from document by splitting on headers."""
text = node.get_content(metadata_mode=MetadataMode.NONE)
markdown_nodes = []
lines = text.split("\n")
current_section = ""
# Keep track of (markdown level, text) for headers
header_stack: List[tuple[int, str]] = []
code_block = False
for line in lines:
# Track if we're inside a code block to avoid parsing headers in code
if line.lstrip().startswith("```"):
code_block = not code_block
current_section += line + "\n"
continue
# Only parse headers if we're not in a code block
if not code_block:
header_match = re.match(r"^(#+)\s(.*)", line)
if header_match:
# Save the previous section before starting a new one
if current_section.strip():
markdown_nodes.append(
self._build_node_from_split(
current_section.strip(),
node,
"/".join(h[1] for h in header_stack[:-1]),
)
)
header_level = len(header_match.group(1))
header_text = header_match.group(2)
# Compare against top-of-stack item’s markdown level.
# Pop headers of equal or higher markdown level; not necessarily current stack size / depth.
# Hierarchy depth gets deeper one level at a time, but markdown headers can jump from H1 to H3, for example.
while header_stack and header_stack[-1][0] >= header_level:
header_stack.pop()
# Add the new header
header_stack.append((header_level, header_text))
current_section = "#" * header_level + f" {header_text}\n"
continue
current_section += line + "\n"
# Add the final section
if current_section.strip():
markdown_nodes.append(
self._build_node_from_split(
current_section.strip(),
node,
"/".join(h[1] for h in header_stack[:-1]),
)
)
return markdown_nodes
def _build_node_from_split(
self,
text_split: str,
node: BaseNode,
header_path: str,
) -> TextNode:
"""Build node from single text split."""
node = build_nodes_from_splits([text_split], node, id_func=self.id_func)[0]
if self.include_metadata:
node.metadata["header_path"] = (
"/" + header_path + "/" if header_path else "/"
)
return node
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> List[BaseNode]:
"""Parse nodes."""
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.get_nodes_from_node(node)
all_nodes.extend(nodes)
return all_nodes
|
"""Markdown node parser."""
import re
from typing import Any, List, Optional, Sequence
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.utils import get_tqdm_iterable
class MarkdownNodeParser(NodeParser):
"""Markdown node parser.
Splits a document into Nodes using Markdown header-based splitting logic.
Each node contains its text content and the path of headers leading to it.
Args:
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
@classmethod
def from_defaults(
cls,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
) -> "MarkdownNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
)
def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]:
"""Get nodes from document by splitting on headers."""
text = node.get_content(metadata_mode=MetadataMode.NONE)
markdown_nodes = []
lines = text.split("\n")
current_section = ""
# Keep track of headers at each level
header_stack: List[str] = []
code_block = False
for line in lines:
# Track if we're inside a code block to avoid parsing headers in code
if line.lstrip().startswith("```"):
code_block = not code_block
current_section += line + "\n"
continue
# Only parse headers if we're not in a code block
if not code_block:
header_match = re.match(r"^(#+)\s(.*)", line)
if header_match:
# Save the previous section before starting a new one
if current_section.strip():
markdown_nodes.append(
self._build_node_from_split(
current_section.strip(),
node,
"/".join(header_stack[:-1]) if header_stack else "",
)
)
level = len(header_match.group(1))
header_text = header_match.group(2)
# Pop headers of equal or higher level
while header_stack and len(header_stack) >= level:
header_stack.pop()
# Add the new header
header_stack.append(header_text)
current_section = "#" * level + f" {header_text}\n"
continue
current_section += line + "\n"
# Add the final section
if current_section.strip():
markdown_nodes.append(
self._build_node_from_split(
current_section.strip(),
node,
"/".join(header_stack[:-1]) if header_stack else "",
)
)
return markdown_nodes
def _build_node_from_split(
self,
text_split: str,
node: BaseNode,
header_path: str,
) -> TextNode:
"""Build node from single text split."""
node = build_nodes_from_splits([text_split], node, id_func=self.id_func)[0]
if self.include_metadata:
node.metadata["header_path"] = (
"/" + header_path + "/" if header_path else "/"
)
return node
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> List[BaseNode]:
"""Parse nodes."""
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.get_nodes_from_node(node)
all_nodes.extend(nodes)
return all_nodes
|
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Callable
import torch
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
"""
tokenize_fn: Callable
valid_label_columns: list[str] = field(default_factory=lambda: ["label", "score"])
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
columns = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in columns:
columns.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in columns:
batch["label"] = torch.tensor([row[label_column] for row in features])
columns.remove(label_column)
break
# Extract the feature columns
for column in columns:
tokenized = self.tokenize_fn([row[column] for row in features])
for key, value in tokenized.items():
batch[f"{column}_{key}"] = value
return batch
|
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List
import torch
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
"""
tokenize_fn: Callable
valid_label_columns: List[str] = field(default_factory=lambda: ["label", "score"])
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
columns = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in columns:
columns.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in columns:
batch["label"] = torch.tensor([row[label_column] for row in features])
columns.remove(label_column)
break
# Extract the feature columns
for column in columns:
tokenized = self.tokenize_fn([row[column] for row in features])
for key, value in tokenized.items():
batch[f"{column}_{key}"] = value
return batch
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
# TODO: Due to interface changes, the current class
# functions incorrectly
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Defaults to 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
"""Unpad the input image.
Args:
input (np.ndarray): The image to unpad.
unpad_shape (tuple): The shape of image before padding.
Returns:
np.ndarray: The image before padding.
"""
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[dict], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_inner_iters(batch_idx, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.visualizer.add_datasample(name, origin_image,
data_sample, output,
self.draw_gt, self.draw_pred)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
# TODO: Due to interface changes, the current class
# functions incorrectly
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
"""Unpad the input image.
Args:
input (np.ndarray): The image to unpad.
unpad_shape (tuple): The shape of image before padding.
Returns:
np.ndarray: The image before padding.
"""
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[dict], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_inner_iters(batch_idx, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.visualizer.add_datasample(name, origin_image,
data_sample, output,
self.draw_gt, self.draw_pred)
|
import pytest
from llama_index.embeddings.openai.utils import (
DEFAULT_OPENAI_API_BASE,
DEFAULT_OPENAI_API_VERSION,
MISSING_API_KEY_ERROR_MESSAGE,
resolve_openai_credentials,
validate_openai_api_key,
)
def test_validate_openai_api_key_with_valid_key() -> None:
validate_openai_api_key("valid_api_key")
def test_validate_openai_api_key_with_env_var(monkeypatch) -> None:
monkeypatch.setenv("OPENAI_API_KEY", "valid_api_key")
validate_openai_api_key()
def test_validate_openai_api_key_with_no_key(monkeypatch) -> None:
monkeypatch.setenv("OPENAI_API_KEY", "")
with pytest.raises(ValueError, match=MISSING_API_KEY_ERROR_MESSAGE):
validate_openai_api_key()
def test_validate_openai_api_key_with_empty_env_var(monkeypatch) -> None:
monkeypatch.setenv("OPENAI_API_KEY", "")
with pytest.raises(ValueError, match=MISSING_API_KEY_ERROR_MESSAGE):
validate_openai_api_key()
def test_resolve_openai_credentials_with_params() -> None:
api_key, api_base, api_version = resolve_openai_credentials(
api_key="param_api_key",
api_base="param_api_base",
api_version="param_api_version",
)
assert api_key == "param_api_key"
assert api_base == "param_api_base"
assert api_version == "param_api_version"
def test_resolve_openai_credentials_with_env_vars(monkeypatch) -> None:
monkeypatch.setenv("OPENAI_API_KEY", "env_api_key")
monkeypatch.setenv("OPENAI_API_BASE", "env_api_base")
monkeypatch.setenv("OPENAI_API_VERSION", "env_api_version")
api_key, api_base, api_version = resolve_openai_credentials()
assert api_key == "env_api_key"
assert api_base == "env_api_base"
assert api_version == "env_api_version"
def test_resolve_openai_credentials_with_openai_module(monkeypatch) -> None:
monkeypatch.setattr("openai.base_url", "module_api_base")
monkeypatch.setattr("openai.api_version", "module_api_version")
api_key, api_base, api_version = resolve_openai_credentials()
assert api_base == "module_api_base"
assert api_version == "module_api_version"
def test_resolve_openai_credentials_with_defaults() -> None:
api_key, api_base, api_version = resolve_openai_credentials()
assert api_base == DEFAULT_OPENAI_API_BASE
assert api_version == DEFAULT_OPENAI_API_VERSION
|
import pytest
from llama_index.embeddings.openai.utils import (
resolve_openai_credentials,
validate_openai_api_key,
MISSING_API_KEY_ERROR_MESSAGE,
DEFAULT_OPENAI_API_BASE,
DEFAULT_OPENAI_API_VERSION,
)
def test_validate_openai_api_key_with_valid_key() -> None:
validate_openai_api_key("valid_api_key")
def test_validate_openai_api_key_with_env_var(monkeypatch) -> None:
monkeypatch.setenv("OPENAI_API_KEY", "valid_api_key")
validate_openai_api_key()
def test_validate_openai_api_key_with_no_key() -> None:
with pytest.raises(ValueError, match=MISSING_API_KEY_ERROR_MESSAGE):
validate_openai_api_key()
def test_validate_openai_api_key_with_empty_env_var(monkeypatch) -> None:
monkeypatch.setenv("OPENAI_API_KEY", "")
with pytest.raises(ValueError, match=MISSING_API_KEY_ERROR_MESSAGE):
validate_openai_api_key()
def test_resolve_openai_credentials_with_params() -> None:
api_key, api_base, api_version = resolve_openai_credentials(
api_key="param_api_key",
api_base="param_api_base",
api_version="param_api_version",
)
assert api_key == "param_api_key"
assert api_base == "param_api_base"
assert api_version == "param_api_version"
def test_resolve_openai_credentials_with_env_vars(monkeypatch) -> None:
monkeypatch.setenv("OPENAI_API_KEY", "env_api_key")
monkeypatch.setenv("OPENAI_API_BASE", "env_api_base")
monkeypatch.setenv("OPENAI_API_VERSION", "env_api_version")
api_key, api_base, api_version = resolve_openai_credentials()
assert api_key == "env_api_key"
assert api_base == "env_api_base"
assert api_version == "env_api_version"
def test_resolve_openai_credentials_with_openai_module(monkeypatch) -> None:
monkeypatch.setattr("openai.api_key", "module_api_key")
monkeypatch.setattr("openai.base_url", "module_api_base")
monkeypatch.setattr("openai.api_version", "module_api_version")
api_key, api_base, api_version = resolve_openai_credentials()
assert api_key == "module_api_key"
assert api_base == "module_api_base"
assert api_version == "module_api_version"
def test_resolve_openai_credentials_with_defaults() -> None:
api_key, api_base, api_version = resolve_openai_credentials()
assert api_key == ""
assert api_base == DEFAULT_OPENAI_API_BASE
assert api_version == DEFAULT_OPENAI_API_VERSION
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import CredentialsField, CredentialsMetaInput, OAuth2Credentials
from backend.integrations.providers import ProviderName
from backend.util.settings import Secrets
# --8<-- [start:GoogleOAuthIsConfigured]
secrets = Secrets()
GOOGLE_OAUTH_IS_CONFIGURED = bool(
secrets.google_client_id and secrets.google_client_secret
)
# --8<-- [end:GoogleOAuthIsConfigured]
GoogleCredentials = OAuth2Credentials
GoogleCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.GOOGLE], Literal["oauth2"]
]
def GoogleCredentialsField(scopes: list[str]) -> GoogleCredentialsInput:
"""
Creates a Google credentials input on a block.
Params:
scopes: The authorization scopes needed for the block to work.
"""
return CredentialsField(
required_scopes=set(scopes),
description="The Google integration requires OAuth2 authentication.",
)
TEST_CREDENTIALS = OAuth2Credentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="google",
access_token=SecretStr("mock-google-access-token"),
refresh_token=SecretStr("mock-google-refresh-token"),
access_token_expires_at=1234567890,
scopes=[
"https://www.googleapis.com/auth/gmail.readonly",
"https://www.googleapis.com/auth/gmail.send",
],
title="Mock Google OAuth2 Credentials",
username="mock-google-username",
refresh_token_expires_at=1234567890,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import CredentialsField, CredentialsMetaInput, OAuth2Credentials
from backend.util.settings import Secrets
# --8<-- [start:GoogleOAuthIsConfigured]
secrets = Secrets()
GOOGLE_OAUTH_IS_CONFIGURED = bool(
secrets.google_client_id and secrets.google_client_secret
)
# --8<-- [end:GoogleOAuthIsConfigured]
GoogleCredentials = OAuth2Credentials
GoogleCredentialsInput = CredentialsMetaInput[Literal["google"], Literal["oauth2"]]
def GoogleCredentialsField(scopes: list[str]) -> GoogleCredentialsInput:
"""
Creates a Google credentials input on a block.
Params:
scopes: The authorization scopes needed for the block to work.
"""
return CredentialsField(
provider="google",
supported_credential_types={"oauth2"},
required_scopes=set(scopes),
description="The Google integration requires OAuth2 authentication.",
)
TEST_CREDENTIALS = OAuth2Credentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="google",
access_token=SecretStr("mock-google-access-token"),
refresh_token=SecretStr("mock-google-refresh-token"),
access_token_expires_at=1234567890,
scopes=[
"https://www.googleapis.com/auth/gmail.readonly",
"https://www.googleapis.com/auth/gmail.send",
],
title="Mock Google OAuth2 Credentials",
username="mock-google-username",
refresh_token_expires_at=1234567890,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
|
from typing import List, Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.documents import Document
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.you import YouSearchAPIWrapper
class YouInput(BaseModel):
"""Input schema for the you.com tool."""
query: str = Field(description="should be a search query")
class YouSearchTool(BaseTool):
"""Tool that searches the you.com API."""
name: str = "you_search"
description: str = (
"The YOU APIs make LLMs and search experiences more factual and"
"up to date with realtime web data."
)
args_schema: Type[BaseModel] = YouInput
api_wrapper: YouSearchAPIWrapper = Field(default_factory=YouSearchAPIWrapper)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Document]:
"""Use the you.com tool."""
return self.api_wrapper.results(query)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> List[Document]:
"""Use the you.com tool asynchronously."""
return await self.api_wrapper.results_async(query)
|
from typing import List, Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.documents import Document
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.you import YouSearchAPIWrapper
class YouInput(BaseModel):
"""Input schema for the you.com tool."""
query: str = Field(description="should be a search query")
class YouSearchTool(BaseTool): # type: ignore[override, override]
"""Tool that searches the you.com API."""
name: str = "you_search"
description: str = (
"The YOU APIs make LLMs and search experiences more factual and"
"up to date with realtime web data."
)
args_schema: Type[BaseModel] = YouInput
api_wrapper: YouSearchAPIWrapper = Field(default_factory=YouSearchAPIWrapper)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Document]:
"""Use the you.com tool."""
return self.api_wrapper.results(query)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> List[Document]:
"""Use the you.com tool asynchronously."""
return await self.api_wrapper.results_async(query)
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
router_mapping (`Optional[Dict[str, str]]`, *optional*):
A mapping of dataset names to Router routes, like "query" or "document". This is used to specify which
Router module to use for each dataset. Two formats are accepted:
1. `Dict[str, str]`: A mapping of dataset names to routes for single-dataset training/evaluation.
2. `Dict[str, Dict[str, str]]`: A mapping of dataset names to a mapping of column names to routes for
multi-dataset training/evaluation.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter names to learning rates. This allows you to set different learning rates for
different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is useful when you want to
fine-tune specific parts of the model with different learning rates.
"""
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
import os
import sys
from pathlib import Path
import pytest
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
from .utils import execute_subprocess_async, get_torch_dist_unique_port, require_torch
def test_split_dataset_by_node_map_style():
full_ds = Dataset.from_dict({"i": range(17)})
full_size = len(full_ds)
world_size = 3
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(ds) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
def test_split_dataset_by_node_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 3
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
@pytest.mark.parametrize("shards_per_node", [1, 2, 3])
def test_split_dataset_by_node_iterable_sharded(shards_per_node):
def gen(shards):
for shard in shards:
yield from ({"i": i, "shard": shard} for i in range(17))
world_size = 3
num_shards = shards_per_node * world_size
gen_kwargs = {"shards": [f"shard_{shard_idx}.txt" for shard_idx in range(num_shards)]}
full_ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs)
full_size = len(list(full_ds))
assert full_ds.n_shards == world_size * shards_per_node
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert [ds.n_shards for ds in datasets_per_rank] == [shards_per_node] * world_size
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len({tuple(x.values()) for ds in datasets_per_rank for x in ds}) == full_size
@pytest.mark.parametrize("streaming", [False, True])
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_launch(streaming):
nproc_per_node = 2
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "test_torch_distributed_launch.py"
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
@pytest.mark.parametrize(
"nproc_per_node, num_workers",
[
(2, 2), # each node has 2 shards and each worker has 1 shards
(3, 2), # each node uses all the shards but skips examples, and each worker has 2 shards
],
)
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_launch_streaming_with_num_workers(nproc_per_node, num_workers):
streaming = True
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "test_torch_distributed_launch.py"
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
--num_workers={num_workers}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
|
import os
import sys
from pathlib import Path
import pytest
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
from .utils import execute_subprocess_async, get_torch_dist_unique_port, require_torch
def test_split_dataset_by_node_map_style():
full_ds = Dataset.from_dict({"i": range(17)})
full_size = len(full_ds)
world_size = 3
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(ds) for ds in datasets_per_rank) == full_size
assert len(set(tuple(x.values()) for ds in datasets_per_rank for x in ds)) == full_size
def test_split_dataset_by_node_iterable():
def gen():
return ({"i": i} for i in range(17))
world_size = 3
full_ds = IterableDataset.from_generator(gen)
full_size = len(list(full_ds))
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len(set(tuple(x.values()) for ds in datasets_per_rank for x in ds)) == full_size
@pytest.mark.parametrize("shards_per_node", [1, 2, 3])
def test_split_dataset_by_node_iterable_sharded(shards_per_node):
def gen(shards):
for shard in shards:
yield from ({"i": i, "shard": shard} for i in range(17))
world_size = 3
num_shards = shards_per_node * world_size
gen_kwargs = {"shards": [f"shard_{shard_idx}.txt" for shard_idx in range(num_shards)]}
full_ds = IterableDataset.from_generator(gen, gen_kwargs=gen_kwargs)
full_size = len(list(full_ds))
assert full_ds.n_shards == world_size * shards_per_node
datasets_per_rank = [
split_dataset_by_node(full_ds, rank=rank, world_size=world_size) for rank in range(world_size)
]
assert [ds.n_shards for ds in datasets_per_rank] == [shards_per_node] * world_size
assert sum(len(list(ds)) for ds in datasets_per_rank) == full_size
assert len(set(tuple(x.values()) for ds in datasets_per_rank for x in ds)) == full_size
@pytest.mark.parametrize("streaming", [False, True])
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_launch(streaming):
nproc_per_node = 2
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "test_torch_distributed_launch.py"
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
@pytest.mark.parametrize(
"nproc_per_node, num_workers",
[
(2, 2), # each node has 2 shards and each worker has 1 shards
(3, 2), # each node uses all the shards but skips examples, and each worker has 2 shards
],
)
@require_torch
@pytest.mark.skipif(os.name == "nt", reason="execute_subprocess_async doesn't support windows")
@pytest.mark.integration
def test_torch_distributed_launch_streaming_with_num_workers(nproc_per_node, num_workers):
streaming = True
master_port = get_torch_dist_unique_port()
test_script = Path(__file__).resolve().parent / "distributed_scripts" / "test_torch_distributed_launch.py"
distributed_args = f"""
-m torch.distributed.launch
--nproc_per_node={nproc_per_node}
--master_port={master_port}
{test_script}
""".split()
args = f"""
--streaming={streaming}
--num_workers={num_workers}
""".split()
cmd = [sys.executable] + distributed_args + args
execute_subprocess_async(cmd, env=os.environ.copy())
|
STRUCTURED_FORMAT_INSTRUCTIONS = """The output should be a markdown code snippet formatted in the following schema, including the leading and trailing "```json" and "```":
```json
{{
{format}
}}
```""" # noqa: E501
STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS = """
```json
{{
{format}
}}
```"""
PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}
the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
Here is the output schema:
```
{schema}
```""" # noqa: E501
YAML_FORMAT_INSTRUCTIONS = """The output should be formatted as a YAML instance that conforms to the given JSON schema below.
# Examples
## Schema
```
{{"title": "Players", "description": "A list of players", "type": "array", "items": {{"$ref": "#/definitions/Player"}}, "definitions": {{"Player": {{"title": "Player", "type": "object", "properties": {{"name": {{"title": "Name", "description": "Player name", "type": "string"}}, "avg": {{"title": "Avg", "description": "Batting average", "type": "number"}}}}, "required": ["name", "avg"]}}}}}}
```
## Well formatted instance
```
- name: John Doe
avg: 0.3
- name: Jane Maxfield
avg: 1.4
```
## Schema
```
{{"properties": {{"habit": {{ "description": "A common daily habit", "type": "string" }}, "sustainable_alternative": {{ "description": "An environmentally friendly alternative to the habit", "type": "string"}}}}, "required": ["habit", "sustainable_alternative"]}}
```
## Well formatted instance
```
habit: Using disposable water bottles for daily hydration.
sustainable_alternative: Switch to a reusable water bottle to reduce plastic waste and decrease your environmental footprint.
```
Please follow the standard YAML formatting conventions with an indent of 2 spaces and make sure that the data types adhere strictly to the following JSON schema:
```
{schema}
```
Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!""" # noqa: E501
PANDAS_DATAFRAME_FORMAT_INSTRUCTIONS = """The output should be formatted as a string as the operation, followed by a colon, followed by the column or row to be queried on, followed by optional array parameters.
1. The column names are limited to the possible columns below.
2. Arrays must either be a comma-separated list of numbers formatted as [1,3,5], or it must be in range of numbers formatted as [0..4].
3. Remember that arrays are optional and not necessarily required.
4. If the column is not in the possible columns or the operation is not a valid Pandas DataFrame operation, return why it is invalid as a sentence starting with either "Invalid column" or "Invalid operation".
As an example, for the formats:
1. String "column:num_legs" is a well-formatted instance which gets the column num_legs, where num_legs is a possible column.
2. String "row:1" is a well-formatted instance which gets row 1.
3. String "column:num_legs[1,2]" is a well-formatted instance which gets the column num_legs for rows 1 and 2, where num_legs is a possible column.
4. String "row:1[num_legs]" is a well-formatted instance which gets row 1, but for just column num_legs, where num_legs is a possible column.
5. String "mean:num_legs[1..3]" is a well-formatted instance which takes the mean of num_legs from rows 1 to 3, where num_legs is a possible column and mean is a valid Pandas DataFrame operation.
6. String "do_something:num_legs" is a badly-formatted instance, where do_something is not a valid Pandas DataFrame operation.
7. String "mean:invalid_col" is a badly-formatted instance, where invalid_col is not a possible column.
Here are the possible columns:
```
{columns}
```
""" # noqa: E501
|
# flake8: noqa
STRUCTURED_FORMAT_INSTRUCTIONS = """The output should be a markdown code snippet formatted in the following schema, including the leading and trailing "```json" and "```":
```json
{{
{format}
}}
```"""
STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS = """
```json
{{
{format}
}}
```"""
PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}
the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
Here is the output schema:
```
{schema}
```"""
YAML_FORMAT_INSTRUCTIONS = """The output should be formatted as a YAML instance that conforms to the given JSON schema below.
# Examples
## Schema
```
{{"title": "Players", "description": "A list of players", "type": "array", "items": {{"$ref": "#/definitions/Player"}}, "definitions": {{"Player": {{"title": "Player", "type": "object", "properties": {{"name": {{"title": "Name", "description": "Player name", "type": "string"}}, "avg": {{"title": "Avg", "description": "Batting average", "type": "number"}}}}, "required": ["name", "avg"]}}}}}}
```
## Well formatted instance
```
- name: John Doe
avg: 0.3
- name: Jane Maxfield
avg: 1.4
```
## Schema
```
{{"properties": {{"habit": {{ "description": "A common daily habit", "type": "string" }}, "sustainable_alternative": {{ "description": "An environmentally friendly alternative to the habit", "type": "string"}}}}, "required": ["habit", "sustainable_alternative"]}}
```
## Well formatted instance
```
habit: Using disposable water bottles for daily hydration.
sustainable_alternative: Switch to a reusable water bottle to reduce plastic waste and decrease your environmental footprint.
```
Please follow the standard YAML formatting conventions with an indent of 2 spaces and make sure that the data types adhere strictly to the following JSON schema:
```
{schema}
```
Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!"""
PANDAS_DATAFRAME_FORMAT_INSTRUCTIONS = """The output should be formatted as a string as the operation, followed by a colon, followed by the column or row to be queried on, followed by optional array parameters.
1. The column names are limited to the possible columns below.
2. Arrays must either be a comma-separated list of numbers formatted as [1,3,5], or it must be in range of numbers formatted as [0..4].
3. Remember that arrays are optional and not necessarily required.
4. If the column is not in the possible columns or the operation is not a valid Pandas DataFrame operation, return why it is invalid as a sentence starting with either "Invalid column" or "Invalid operation".
As an example, for the formats:
1. String "column:num_legs" is a well-formatted instance which gets the column num_legs, where num_legs is a possible column.
2. String "row:1" is a well-formatted instance which gets row 1.
3. String "column:num_legs[1,2]" is a well-formatted instance which gets the column num_legs for rows 1 and 2, where num_legs is a possible column.
4. String "row:1[num_legs]" is a well-formatted instance which gets row 1, but for just column num_legs, where num_legs is a possible column.
5. String "mean:num_legs[1..3]" is a well-formatted instance which takes the mean of num_legs from rows 1 to 3, where num_legs is a possible column and mean is a valid Pandas DataFrame operation.
6. String "do_something:num_legs" is a badly-formatted instance, where do_something is not a valid Pandas DataFrame operation.
7. String "mean:invalid_col" is a badly-formatted instance, where invalid_col is not a possible column.
Here are the possible columns:
```
{columns}
```
"""
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import subprocess
import tempfile
import unittest
from typing import List
from accelerate.utils import write_basic_config
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occurred while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class ExamplesTestsAccelerate(unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._tmpdir = tempfile.mkdtemp()
cls.configPath = os.path.join(cls._tmpdir, "default_config.yml")
write_basic_config(save_location=cls.configPath)
cls._launch_args = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import subprocess
import tempfile
import unittest
from typing import List
from accelerate.utils import write_basic_config
# These utils relate to ensuring the right error message is received when running scripts
class SubprocessCallException(Exception):
pass
def run_command(command: List[str], return_stdout=False):
"""
Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
if an error occurred while running `command`
"""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(output, "decode"):
output = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
) from e
class ExamplesTestsAccelerate(unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._tmpdir = tempfile.mkdtemp()
cls.configPath = os.path.join(cls._tmpdir, "default_config.yml")
write_basic_config(save_location=cls.configPath)
cls._launch_args = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
|
"""Test Baichuan Text Embedding."""
from langchain_community.embeddings.baichuan import BaichuanTextEmbeddings
def test_baichuan_embedding_documents() -> None:
"""Test Baichuan Text Embedding for documents."""
documents = ["今天天气不错", "今天阳光灿烂"]
embedding = BaichuanTextEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2 # type: ignore[arg-type]
assert len(output[0]) == 1024 # type: ignore[index]
def test_baichuan_embedding_query() -> None:
"""Test Baichuan Text Embedding for query."""
document = "所有的小学生都会学过只因兔同笼问题。"
embedding = BaichuanTextEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 1024 # type: ignore[arg-type]
def test_baichuan_embeddings_multi_documents() -> None:
"""Test Baichuan Text Embedding for documents with multi texts."""
document = "午餐吃了螺蛳粉"
doc_amount = 35
embeddings = BaichuanTextEmbeddings()
output = embeddings.embed_documents([document] * doc_amount)
assert len(output) == doc_amount # type: ignore[arg-type]
assert len(output[0]) == 1024 # type: ignore[index]
|
"""Test Baichuan Text Embedding."""
from langchain_community.embeddings.baichuan import BaichuanTextEmbeddings
def test_baichuan_embedding_documents() -> None:
"""Test Baichuan Text Embedding for documents."""
documents = ["今天天气不错", "今天阳光灿烂"]
embedding = BaichuanTextEmbeddings() # type: ignore[call-arg]
output = embedding.embed_documents(documents)
assert len(output) == 2 # type: ignore[arg-type]
assert len(output[0]) == 1024 # type: ignore[index]
def test_baichuan_embedding_query() -> None:
"""Test Baichuan Text Embedding for query."""
document = "所有的小学生都会学过只因兔同笼问题。"
embedding = BaichuanTextEmbeddings() # type: ignore[call-arg]
output = embedding.embed_query(document)
assert len(output) == 1024 # type: ignore[arg-type]
def test_baichuan_embeddings_multi_documents() -> None:
"""Test Baichuan Text Embedding for documents with multi texts."""
document = "午餐吃了螺蛳粉"
doc_amount = 35
embeddings = BaichuanTextEmbeddings() # type: ignore[call-arg]
output = embeddings.embed_documents([document] * doc_amount)
assert len(output) == doc_amount # type: ignore[arg-type]
assert len(output[0]) == 1024 # type: ignore[index]
|
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_fireworks import ChatFireworks
class TestFireworksStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatFireworks
@property
def chat_model_params(self) -> dict:
return {"api_key": "test_api_key"}
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return (
{
"FIREWORKS_API_KEY": "api_key",
"FIREWORKS_API_BASE": "https://base.com",
},
{},
{
"fireworks_api_key": "api_key",
"fireworks_api_base": "https://base.com",
},
)
|
"""Standard LangChain interface tests"""
from typing import Tuple, Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_fireworks import ChatFireworks
class TestFireworksStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatFireworks
@property
def chat_model_params(self) -> dict:
return {"api_key": "test_api_key"}
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
return (
{
"FIREWORKS_API_KEY": "api_key",
"FIREWORKS_API_BASE": "https://base.com",
},
{},
{
"fireworks_api_key": "api_key",
"fireworks_api_base": "https://base.com",
},
)
|
"""
==================================
Getting started with transforms v2
==================================
Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports
images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding boxes, and masks. This
example showcases the core functionality of the new ``torchvision.transforms.v2`` API.
"""
import pathlib
import torch
import torchvision
def load_data():
from torchvision.io import read_image
from torchvision import datapoints
from torchvision.ops import masks_to_boxes
assets_directory = pathlib.Path("assets")
path = assets_directory / "FudanPed00054.png"
image = datapoints.Image(read_image(str(path)))
merged_masks = read_image(str(assets_directory / "FudanPed00054_mask.png"))
labels = torch.unique(merged_masks)[1:]
masks = datapoints.Mask(merged_masks == labels.view(-1, 1, 1))
bounding_boxes = datapoints.BoundingBoxes(
masks_to_boxes(masks), format=datapoints.BoundingBoxFormat.XYXY, canvas_size=image.shape[-2:]
)
return path, image, bounding_boxes, masks, labels
########################################################################################################################
# The :mod:`torchvision.transforms.v2` API supports images, videos, bounding boxes, and instance and segmentation
# masks. Thus, it offers native support for many Computer Vision tasks, like image and video classification, object
# detection or instance and semantic segmentation. Still, the interface is the same, making
# :mod:`torchvision.transforms.v2` a drop-in replacement for the existing :mod:`torchvision.transforms` API, aka v1.
# We are using BETA APIs, so we deactivate the associated warning, thereby acknowledging that
# some APIs may slightly change in the future
torchvision.disable_beta_transforms_warning()
import torchvision.transforms.v2 as transforms
transform = transforms.Compose(
[
transforms.ColorJitter(contrast=0.5),
transforms.RandomRotation(30),
transforms.CenterCrop(480),
]
)
########################################################################################################################
# :mod:`torchvision.transforms.v2` natively supports jointly transforming multiple inputs while making sure that
# potential random behavior is consistent across all inputs. However, it doesn't enforce a specific input structure or
# order.
path, image, bounding_boxes, masks, labels = load_data()
torch.manual_seed(0)
new_image = transform(image) # Image Classification
new_image, new_bounding_boxes, new_labels = transform(image, bounding_boxes, labels) # Object Detection
new_image, new_bounding_boxes, new_masks, new_labels = transform(
image, bounding_boxes, masks, labels
) # Instance Segmentation
new_image, new_target = transform((image, {"boxes": bounding_boxes, "labels": labels})) # Arbitrary Structure
########################################################################################################################
# Under the hood, :mod:`torchvision.transforms.v2` relies on :mod:`torchvision.datapoints` for the dispatch to the
# appropriate function for the input data: :ref:`sphx_glr_auto_examples_plot_datapoints.py`. Note however, that as
# regular user, you likely don't have to touch this yourself. See
# :ref:`sphx_glr_auto_examples_plot_transforms_v2_e2e.py`.
#
# All "foreign" types like :class:`str`'s or :class:`pathlib.Path`'s are passed through, allowing to store extra
# information directly with the sample:
sample = {"path": path, "image": image}
new_sample = transform(sample)
assert new_sample["path"] is sample["path"]
########################################################################################################################
# As stated above, :mod:`torchvision.transforms.v2` is a drop-in replacement for :mod:`torchvision.transforms` and thus
# also supports transforming plain :class:`torch.Tensor`'s as image or video if applicable. This is achieved with a
# simple heuristic:
#
# * If we find an explicit image or video (:class:`torchvision.datapoints.Image`, :class:`torchvision.datapoints.Video`,
# or :class:`PIL.Image.Image`) in the input, all other plain tensors are passed through.
# * If there is no explicit image or video, only the first plain :class:`torch.Tensor` will be transformed as image or
# video, while all others will be passed through.
plain_tensor_image = torch.rand(image.shape)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor together with an explicit image, will not transform the former
plain_tensor_image, image = transform(plain_tensor_image, image)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor without an explicit image, will transform the former
plain_tensor_image, _ = transform(plain_tensor_image, bounding_boxes)
print(image.shape, plain_tensor_image.shape)
|
"""
==================================
Getting started with transforms v2
==================================
Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports
images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding boxes, and masks. This
example showcases the core functionality of the new ``torchvision.transforms.v2`` API.
"""
import pathlib
import torch
import torchvision
def load_data():
from torchvision.io import read_image
from torchvision import datapoints
from torchvision.ops import masks_to_boxes
assets_directory = pathlib.Path("assets")
path = assets_directory / "FudanPed00054.png"
image = datapoints.Image(read_image(str(path)))
merged_masks = read_image(str(assets_directory / "FudanPed00054_mask.png"))
labels = torch.unique(merged_masks)[1:]
masks = datapoints.Mask(merged_masks == labels.view(-1, 1, 1))
bounding_boxes = datapoints.BoundingBoxes(
masks_to_boxes(masks), format=datapoints.BoundingBoxFormat.XYXY, spatial_size=image.shape[-2:]
)
return path, image, bounding_boxes, masks, labels
########################################################################################################################
# The :mod:`torchvision.transforms.v2` API supports images, videos, bounding boxes, and instance and segmentation
# masks. Thus, it offers native support for many Computer Vision tasks, like image and video classification, object
# detection or instance and semantic segmentation. Still, the interface is the same, making
# :mod:`torchvision.transforms.v2` a drop-in replacement for the existing :mod:`torchvision.transforms` API, aka v1.
# We are using BETA APIs, so we deactivate the associated warning, thereby acknowledging that
# some APIs may slightly change in the future
torchvision.disable_beta_transforms_warning()
import torchvision.transforms.v2 as transforms
transform = transforms.Compose(
[
transforms.ColorJitter(contrast=0.5),
transforms.RandomRotation(30),
transforms.CenterCrop(480),
]
)
########################################################################################################################
# :mod:`torchvision.transforms.v2` natively supports jointly transforming multiple inputs while making sure that
# potential random behavior is consistent across all inputs. However, it doesn't enforce a specific input structure or
# order.
path, image, bounding_boxes, masks, labels = load_data()
torch.manual_seed(0)
new_image = transform(image) # Image Classification
new_image, new_bounding_boxes, new_labels = transform(image, bounding_boxes, labels) # Object Detection
new_image, new_bounding_boxes, new_masks, new_labels = transform(
image, bounding_boxes, masks, labels
) # Instance Segmentation
new_image, new_target = transform((image, {"boxes": bounding_boxes, "labels": labels})) # Arbitrary Structure
########################################################################################################################
# Under the hood, :mod:`torchvision.transforms.v2` relies on :mod:`torchvision.datapoints` for the dispatch to the
# appropriate function for the input data: :ref:`sphx_glr_auto_examples_plot_datapoints.py`. Note however, that as
# regular user, you likely don't have to touch this yourself. See
# :ref:`sphx_glr_auto_examples_plot_transforms_v2_e2e.py`.
#
# All "foreign" types like :class:`str`'s or :class:`pathlib.Path`'s are passed through, allowing to store extra
# information directly with the sample:
sample = {"path": path, "image": image}
new_sample = transform(sample)
assert new_sample["path"] is sample["path"]
########################################################################################################################
# As stated above, :mod:`torchvision.transforms.v2` is a drop-in replacement for :mod:`torchvision.transforms` and thus
# also supports transforming plain :class:`torch.Tensor`'s as image or video if applicable. This is achieved with a
# simple heuristic:
#
# * If we find an explicit image or video (:class:`torchvision.datapoints.Image`, :class:`torchvision.datapoints.Video`,
# or :class:`PIL.Image.Image`) in the input, all other plain tensors are passed through.
# * If there is no explicit image or video, only the first plain :class:`torch.Tensor` will be transformed as image or
# video, while all others will be passed through.
plain_tensor_image = torch.rand(image.shape)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor together with an explicit image, will not transform the former
plain_tensor_image, image = transform(plain_tensor_image, image)
print(image.shape, plain_tensor_image.shape)
# passing a plain tensor without an explicit image, will transform the former
plain_tensor_image, _ = transform(plain_tensor_image, bounding_boxes)
print(image.shape, plain_tensor_image.shape)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import BinaryClassificationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseBinaryClassificationEvaluator(BinaryClassificationEvaluator):
def __init__(
self,
sentences1: list[str],
sentences2: list[str],
labels: list[int],
name: str = "",
batch_size: int = 32,
show_progress_bar: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
similarity_fn_names: list[Literal["cosine", "dot", "euclidean", "manhattan"]] | None = None,
):
super().__init__(
sentences1=sentences1,
sentences2=sentences2,
labels=labels,
name=name,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
truncate_dim=truncate_dim,
similarity_fn_names=similarity_fn_names,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path, epoch, steps)
def compute_metrices(self, model: SparseEncoder) -> dict[str, dict[str, float]]:
return super().compute_metrices(model)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import BinaryClassificationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseBinaryClassificationEvaluator(BinaryClassificationEvaluator):
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super.__call__(model, output_path, epoch, steps)
def compute_metrices(self, model: SparseEncoder) -> dict[str, dict[str, float]]:
return super.__call__(model)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class MaskFormer(SingleStageDetector):
r"""Implementation of `Per-Pixel Classification is
NOT All You Need for Semantic Segmentation
<https://arxiv.org/pdf/2107.06278>`_."""
def __init__(self,
backbone,
neck=None,
panoptic_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None):
super(SingleStageDetector, self).__init__(init_cfg=init_cfg)
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
panoptic_head.update(train_cfg=train_cfg)
panoptic_head.update(test_cfg=test_cfg)
self.panoptic_head = build_head(panoptic_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def forward_dummy(self, img, img_metas):
"""Used for computing network flops. See
`mmdetection/tools/analysis_tools/get_flops.py`
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[Dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
"""
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
outs = self.panoptic_head(x, img_metas)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_masks,
gt_semantic_seg,
gt_bboxes_ignore=None,
**kargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[Dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box.
gt_masks (list[BitmapMasks]): true segmentation masks for each box
used if the architecture supports a segmentation task.
gt_semantic_seg (list[tensor]): semantic segmentation mask for
images.
gt_bboxes_ignore (list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Defaults to None.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# add batch_input_shape in img_metas
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
losses = self.panoptic_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_masks,
gt_semantic_seg,
gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, **kwargs):
"""Test without augmentation."""
feat = self.extract_feat(img)
mask_results = self.panoptic_head.simple_test(feat, img_metas,
**kwargs)
results = []
for mask in mask_results:
result = {'pan_results': mask.detach().cpu().numpy()}
results.append(result)
return results
def aug_test(self, imgs, img_metas, **kwargs):
raise NotImplementedError
def onnx_export(self, img, img_metas):
raise NotImplementedError
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class MaskFormer(SingleStageDetector):
r"""Implementation of `Per-Pixel Classification is
NOT All You Need for Semantic Segmentation
<https://arxiv.org/pdf/2107.06278>`_"""
def __init__(self,
backbone,
neck=None,
panoptic_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None):
super(SingleStageDetector, self).__init__(init_cfg=init_cfg)
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
panoptic_head.update(train_cfg=train_cfg)
panoptic_head.update(test_cfg=test_cfg)
self.panoptic_head = build_head(panoptic_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def forward_dummy(self, img, img_metas):
"""Used for computing network flops. See
`mmdetection/tools/analysis_tools/get_flops.py`
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[Dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
"""
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
outs = self.panoptic_head(x, img_metas)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_masks,
gt_semantic_seg,
gt_bboxes_ignore=None,
**kargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[Dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box.
gt_masks (list[BitmapMasks]): true segmentation masks for each box
used if the architecture supports a segmentation task.
gt_semantic_seg (list[tensor]): semantic segmentation mask for
images.
gt_bboxes_ignore (list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Defaults to None.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# add batch_input_shape in img_metas
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
losses = self.panoptic_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_masks,
gt_semantic_seg,
gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, **kwargs):
"""Test without augmentation."""
feat = self.extract_feat(img)
mask_results = self.panoptic_head.simple_test(feat, img_metas,
**kwargs)
results = []
for mask in mask_results:
result = {'pan_results': mask.detach().cpu().numpy()}
results.append(result)
return results
def aug_test(self, imgs, img_metas, **kwargs):
raise NotImplementedError
def onnx_export(self, img, img_metas):
raise NotImplementedError
|
import random
import time
import pytest
from jina import Client, Document, DocumentArray, Executor, Flow, requests
@pytest.mark.parametrize('stream', [True, False])
@pytest.mark.parametrize('protocol', ['grpc'])
def test_return_order_in_client(protocol, stream):
class ExecutorRandomSleepExecutor(Executor):
@requests
def foo(self, *args, **kwargs):
rand_sleep = random.uniform(0.1, 1.3)
time.sleep(rand_sleep)
f = Flow(protocol=protocol).add(uses=ExecutorRandomSleepExecutor, replicas=2)
input_text = [f'ordinal-{i}' for i in range(180)]
input_da = DocumentArray([Document(text=t) for t in input_text])
with f:
for _ in range(5):
result_flow = f.post(
'/',
inputs=input_da,
request_size=10,
results_in_order=True,
stream=stream,
)
for input, output in zip(input_da, result_flow):
assert input.text == output.text
c = Client(port=f.port, protocol=str(f.protocol))
for _ in range(5):
result_client = c.post(
'/',
inputs=input_da,
request_size=10,
results_in_order=True,
stream=stream,
)
for input, output in zip(input_da, result_client):
assert input.text == output.text
|
import random
import time
import pytest
from jina import Client, Document, DocumentArray, Executor, Flow, requests
@pytest.mark.parametrize('stream', [True, False])
@pytest.mark.parametrize('protocol', ['grpc'])
def test_return_order_in_client(protocol, stream):
class ExecutorRandomSleepExecutor(Executor):
@requests
def foo(self, *args, **kwargs):
rand_sleep = random.uniform(0.1, 1.3)
time.sleep(rand_sleep)
f = Flow(protocol=protocol).add(uses=ExecutorRandomSleepExecutor, replicas=2)
input_text = [f'ordinal-{i}' for i in range(180)]
input_da = DocumentArray([Document(text=t) for t in input_text])
with f:
for _ in range(5):
result_flow = f.post(
'/', inputs=input_da, request_size=10, results_in_order=True, stream=stream
)
for input, output in zip(input_da, result_flow):
assert input.text == output.text
c = Client(port=f.port, protocol=str(f.protocol))
for _ in range(5):
result_client = c.post(
'/', inputs=input_da, request_size=10, results_in_order=True, stream=stream
)
for input, output in zip(input_da, result_client):
assert input.text == output.text
|
"""
=====================
Classifier comparison
=====================
A comparison of several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
from sklearn.datasets import make_circles, make_classification, make_moons
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.inspection import DecisionBoundaryDisplay
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
names = [
"Nearest Neighbors",
"Linear SVM",
"RBF SVM",
"Gaussian Process",
"Decision Tree",
"Random Forest",
"Neural Net",
"AdaBoost",
"Naive Bayes",
"QDA",
]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025, random_state=42),
SVC(gamma=2, C=1, random_state=42),
GaussianProcessClassifier(1.0 * RBF(1.0), random_state=42),
DecisionTreeClassifier(max_depth=5, random_state=42),
RandomForestClassifier(
max_depth=5, n_estimators=10, max_features=1, random_state=42
),
MLPClassifier(alpha=1, max_iter=1000, random_state=42),
AdaBoostClassifier(random_state=42),
GaussianNB(),
QuadraticDiscriminantAnalysis(),
]
X, y = make_classification(
n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1
)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [
make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable,
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=42
)
x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(["#FF0000", "#0000FF"])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors="k")
# Plot the testing points
ax.scatter(
X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors="k"
)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf = make_pipeline(StandardScaler(), clf)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
DecisionBoundaryDisplay.from_estimator(
clf, X, cmap=cm, alpha=0.8, ax=ax, eps=0.5
)
# Plot the training points
ax.scatter(
X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors="k"
)
# Plot the testing points
ax.scatter(
X_test[:, 0],
X_test[:, 1],
c=y_test,
cmap=cm_bright,
edgecolors="k",
alpha=0.6,
)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(
x_max - 0.3,
y_min + 0.3,
("%.2f" % score).lstrip("0"),
size=15,
horizontalalignment="right",
)
i += 1
plt.tight_layout()
plt.show()
|
"""
=====================
Classifier comparison
=====================
A comparison of several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
from sklearn.datasets import make_circles, make_classification, make_moons
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.inspection import DecisionBoundaryDisplay
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
names = [
"Nearest Neighbors",
"Linear SVM",
"RBF SVM",
"Gaussian Process",
"Decision Tree",
"Random Forest",
"Neural Net",
"AdaBoost",
"Naive Bayes",
"QDA",
]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025, random_state=42),
SVC(gamma=2, C=1, random_state=42),
GaussianProcessClassifier(1.0 * RBF(1.0), random_state=42),
DecisionTreeClassifier(max_depth=5, random_state=42),
RandomForestClassifier(
max_depth=5, n_estimators=10, max_features=1, random_state=42
),
MLPClassifier(alpha=1, max_iter=1000, random_state=42),
AdaBoostClassifier(algorithm="SAMME", random_state=42),
GaussianNB(),
QuadraticDiscriminantAnalysis(),
]
X, y = make_classification(
n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1
)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [
make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable,
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=42
)
x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(["#FF0000", "#0000FF"])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors="k")
# Plot the testing points
ax.scatter(
X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors="k"
)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf = make_pipeline(StandardScaler(), clf)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
DecisionBoundaryDisplay.from_estimator(
clf, X, cmap=cm, alpha=0.8, ax=ax, eps=0.5
)
# Plot the training points
ax.scatter(
X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors="k"
)
# Plot the testing points
ax.scatter(
X_test[:, 0],
X_test[:, 1],
c=y_test,
cmap=cm_bright,
edgecolors="k",
alpha=0.6,
)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(
x_max - 0.3,
y_min + 0.3,
("%.2f" % score).lstrip("0"),
size=15,
horizontalalignment="right",
)
i += 1
plt.tight_layout()
plt.show()
|
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='AudioBytes')
@_register_proto(proto_type_name='audio_bytes')
class AudioBytes(bytes, AbstractType):
"""
Bytes that store an audio and that can be load into an Audio tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self) -> Tuple[np.ndarray, int]:
"""
Load the Audio from the bytes into a numpy.ndarray Audio tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import AudioUrl
class MyAudio(Document):
url: AudioUrl
tensor: Optional[NdArray]
bytes: Optional[bytes]
doc = MyAudio(url="toydata/hello.wav")
doc.bytes = doc.url.load_bytes()
doc.tensor, doc.frame_rate = doc.bytes.load()
# Note this is equivalent to do
doc.tensor, doc.frame_rate = doc.url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
:return: np.ndarray representing the Audio as RGB values
"""
from pydub import AudioSegment # type: ignore
segment = AudioSegment.from_file(io.BytesIO(self))
# Convert to float32 using NumPy
samples = np.array(segment.get_array_of_samples())
# Normalise float32 array so that values are between -1.0 and +1.0
samples_norm = samples / 2 ** (segment.sample_width * 8 - 1)
return samples_norm, segment.frame_rate
|
import io
import wave
from typing import TYPE_CHECKING, Any, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import MAX_INT_16
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='AudioBytes')
@_register_proto(proto_type_name='audio_bytes')
class AudioBytes(bytes, AbstractType):
"""
Bytes that store an audio and that can be load into an Audio tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self) -> np.ndarray:
"""
Load the Audio from the bytes into a numpy.ndarray Audio tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import AudioUrl
class MyAudio(Document):
url: AudioUrl
tensor: Optional[NdArray]
bytes: Optional[bytes]
doc = MyAudio(url="toydata/hello.wav")
doc.bytes = doc.url.load_bytes()
doc.tensor = doc.bytes.load()
# Note this is equivalent to do
doc.tensor = doc.url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
:return: np.ndarray representing the Audio as RGB values
"""
# note wave is Python built-in mod. https://docs.python.org/3/library/wave.html
with wave.open(io.BytesIO(self)) as ifile:
samples = ifile.getnframes()
audio = ifile.readframes(samples)
# Convert buffer to float32 using NumPy
audio_as_np_int16 = np.frombuffer(audio, dtype=np.int16)
audio_as_np_float32 = audio_as_np_int16.astype('float32')
# Normalise float32 array so that values are between -1.0 and +1.0
audio_norm = audio_as_np_float32 / MAX_INT_16
channels = ifile.getnchannels()
if channels == 2:
# 1 for mono, 2 for stereo
audio_stereo = np.empty((int(len(audio_norm) / channels), channels))
audio_stereo[:, 0] = audio_norm[range(0, len(audio_norm), 2)]
audio_stereo[:, 1] = audio_norm[range(1, len(audio_norm), 2)]
return audio_stereo
else:
return audio_norm
|
"""Callback Handler streams to stdout on new llm token."""
import sys
from typing import Any, Optional
from langchain_core.callbacks import StreamingStdOutCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class FinalStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler):
"""Callback handler for streaming in agents.
Only works with agents using LLMs that support streaming.
Only the final output of the agent will be streamed.
"""
def append_to_last_tokens(self, token: str) -> None:
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
def check_if_answer_reached(self) -> bool:
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
else:
return self.last_tokens == self.answer_prefix_tokens
def __init__(
self,
*,
answer_prefix_tokens: Optional[list[str]] = None,
strip_tokens: bool = True,
stream_prefix: bool = False,
) -> None:
"""Instantiate FinalStreamingStdOutCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip() for token in self.answer_prefix_tokens
]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [""] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
def on_llm_start(
self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
) -> None:
"""Run when LLM starts running."""
self.answer_reached = False
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
# Remember the last n tokens, where n = len(answer_prefix_tokens)
self.append_to_last_tokens(token)
# Check if the last n tokens match the answer_prefix_tokens list ...
if self.check_if_answer_reached():
self.answer_reached = True
if self.stream_prefix:
for t in self.last_tokens:
sys.stdout.write(t)
sys.stdout.flush()
return
# ... if yes, then print tokens from now on
if self.answer_reached:
sys.stdout.write(token)
sys.stdout.flush()
|
"""Callback Handler streams to stdout on new llm token."""
import sys
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import StreamingStdOutCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class FinalStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler):
"""Callback handler for streaming in agents.
Only works with agents using LLMs that support streaming.
Only the final output of the agent will be streamed.
"""
def append_to_last_tokens(self, token: str) -> None:
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
def check_if_answer_reached(self) -> bool:
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
else:
return self.last_tokens == self.answer_prefix_tokens
def __init__(
self,
*,
answer_prefix_tokens: Optional[List[str]] = None,
strip_tokens: bool = True,
stream_prefix: bool = False,
) -> None:
"""Instantiate FinalStreamingStdOutCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip() for token in self.answer_prefix_tokens
]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [""] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts running."""
self.answer_reached = False
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
# Remember the last n tokens, where n = len(answer_prefix_tokens)
self.append_to_last_tokens(token)
# Check if the last n tokens match the answer_prefix_tokens list ...
if self.check_if_answer_reached():
self.answer_reached = True
if self.stream_prefix:
for t in self.last_tokens:
sys.stdout.write(t)
sys.stdout.flush()
return
# ... if yes, then print tokens from now on
if self.answer_reached:
sys.stdout.write(token)
sys.stdout.flush()
|
from jina.clients.base.websocket import WebSocketBaseClient
from jina.clients.mixin import (
AsyncHealthCheckMixin,
AsyncPostMixin,
HealthCheckMixin,
PostMixin,
)
class WebSocketClient(WebSocketBaseClient, PostMixin, HealthCheckMixin):
"""A client connecting to a Gateway using WebSocket protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# select host address to connect to
c = Client(
protocol='websocket', asyncio=False, host='ws://my.awesome.flow:1234'
) # returns WebSocketClient instance
c.post(on='/index', inputs=Document(text='hello!'))
"""
class AsyncWebSocketClient(WebSocketBaseClient, AsyncPostMixin, AsyncHealthCheckMixin):
"""
Asynchronous client connecting to a Gateway using WebSocket protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
Unlike :class:`WebSocketClient`, here :meth:`post` is a coroutine (i.e. declared with the async/await syntax),
simply calling them will not schedule them to be executed.
To actually run a coroutine, user need to put them in an event loop, e.g. via ``asyncio.run()``,
``asyncio.create_task()``.
:class:`AsyncWebSocketClient` can be very useful in
the integration settings, where Jina/Flow/Client is NOT the main logic, but rather served as a part of other program.
In this case, users often do not want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Client`
is controlling and wrapping the event loop internally, making the Client looks synchronous from outside.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# async inputs for the client
async def async_inputs():
for _ in range(10):
yield Document()
await asyncio.sleep(0.1)
# select host address to connect to
c = Client(
protocol='websocket', asyncio=True, host='http://ws.awesome.flow:1234'
) # returns AsyncWebSocketClient instance
async for resp in client.post(on='/index', async_inputs, request_size=1):
print(resp)
"""
|
from jina.clients.base.websocket import WebSocketBaseClient
from jina.clients.mixin import AsyncPostMixin, HealthCheckMixin, PostMixin
class WebSocketClient(WebSocketBaseClient, PostMixin, HealthCheckMixin):
"""A client connecting to a Gateway using WebSocket protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# select host address to connect to
c = Client(
protocol='websocket', asyncio=False, host='ws://my.awesome.flow:1234'
) # returns WebSocketClient instance
c.post(on='/index', inputs=Document(text='hello!'))
"""
class AsyncWebSocketClient(WebSocketBaseClient, AsyncPostMixin, HealthCheckMixin):
"""
Asynchronous client connecting to a Gateway using WebSocket protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
Unlike :class:`WebSocketClient`, here :meth:`post` is a coroutine (i.e. declared with the async/await syntax),
simply calling them will not schedule them to be executed.
To actually run a coroutine, user need to put them in an event loop, e.g. via ``asyncio.run()``,
``asyncio.create_task()``.
:class:`AsyncWebSocketClient` can be very useful in
the integration settings, where Jina/Flow/Client is NOT the main logic, but rather served as a part of other program.
In this case, users often do not want to let Jina control the ``asyncio.eventloop``. On contrary, :class:`Client`
is controlling and wrapping the event loop internally, making the Client looks synchronous from outside.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# async inputs for the client
async def async_inputs():
for _ in range(10):
yield Document()
await asyncio.sleep(0.1)
# select host address to connect to
c = Client(
protocol='websocket', asyncio=True, host='http://ws.awesome.flow:1234'
) # returns AsyncWebSocketClient instance
async for resp in client.post(on='/index', async_inputs, request_size=1):
print(resp)
"""
|
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='MaskRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_mask=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=norm_cfg,
norm_eval=True),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1024,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
roi_feat_size=7,
in_channels=2048,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
mask_roi_extractor=None,
mask_head=dict(
type='FCNMaskHead',
num_convs=0,
in_channels=2048,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=14,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
nms=dict(type='nms', iou_threshold=0.7),
max_per_img=1000,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
|
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='MaskRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
shared_head=dict(
type='ResLayer',
depth=50,
stage=3,
stride=2,
dilation=1,
style='caffe',
norm_cfg=norm_cfg,
norm_eval=True),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=1024,
featmap_strides=[16]),
bbox_head=dict(
type='BBoxHead',
with_avg_pool=True,
roi_feat_size=7,
in_channels=2048,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
mask_roi_extractor=None,
mask_head=dict(
type='FCNMaskHead',
num_convs=0,
in_channels=2048,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=14,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
nms=dict(type='nms', iou_threshold=0.7),
max_per_img=1000,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
CLASSES = ('face', )
PALETTE = [(0, 255, 0)]
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotation from WIDERFace XML style annotation file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(
dict(
id=img_id,
filename=osp.join(folder, filename),
width=width,
height=height))
return data_infos
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
CLASSES = ('face', )
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotation from WIDERFace XML style annotation file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(
dict(
id=img_id,
filename=osp.join(folder, filename),
width=width,
height=height))
return data_infos
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.15.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.15.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
from unittest import mock
import pytest
from llama_index.core.workflow import Context
from llama_index.core.workflow.handler import WorkflowHandler
def test_str():
h = WorkflowHandler()
h.set_result([])
assert str(h) == "[]"
@pytest.mark.asyncio()
async def test_stream_no_context():
h = WorkflowHandler()
with pytest.raises(ValueError, match="Context is not set!"):
async for ev in h.stream_events():
pass
@pytest.mark.asyncio()
async def test_run_step_no_context():
h = WorkflowHandler()
with pytest.raises(
ValueError,
match="Context must be set to run a workflow step-wise!",
):
await h.run_step()
@pytest.mark.asyncio()
async def test_run_step_no_stepwise():
ctx = mock.MagicMock(spec=Context, stepwise=False)
h = WorkflowHandler(ctx=ctx)
with pytest.raises(
ValueError,
match="Workflow must be created passing stepwise=True to call this method.",
):
await h.run_step()
|
from unittest import mock
import pytest
from llama_index.core.workflow import Context
from llama_index.core.workflow.handler import WorkflowHandler
def test_str():
h = WorkflowHandler()
h.set_result([])
assert str(h) == "[]"
@pytest.mark.asyncio()
async def test_stream_no_context():
h = WorkflowHandler()
h.ctx = None
with pytest.raises(ValueError, match="Context is not set!"):
async for ev in h.stream_events():
pass
@pytest.mark.asyncio()
async def test_run_step_no_context():
h = WorkflowHandler()
h.ctx = None
with pytest.raises(
ValueError,
match="Context must be set to run a workflow step-wise!",
):
await h.run_step()
@pytest.mark.asyncio()
async def test_run_step_no_stepwise():
ctx = mock.MagicMock(spec=Context, stepwise=False)
h = WorkflowHandler(ctx=ctx)
with pytest.raises(
ValueError,
match="Workflow must be created passing stepwise=True to call this method.",
):
await h.run_step()
|
from typing import Dict, Optional, Union
import pytest
from docarray.typing import NdArray, TorchTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal._typing import is_tensor_union, is_type_tensor
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
from docarray.typing import TensorFlowTensor
else:
TensorFlowTensor = None
@pytest.mark.parametrize(
'type_, is_tensor',
[
(int, False),
(TorchTensor, True),
(NdArray, True),
(AbstractTensor, True),
(Optional[TorchTensor], False),
(Union[TorchTensor, NdArray], False),
(None, False),
(Dict, False),
],
)
def test_is_type_tensor(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'type_, is_tensor',
[
(TensorFlowTensor, True),
(Optional[TensorFlowTensor], False),
],
)
def test_is_type_tensor_with_tf(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(int, False),
(TorchTensor, False),
(NdArray, False),
(Optional[TorchTensor], True),
(Optional[NdArray], True),
(Union[NdArray, TorchTensor], True),
(Union[NdArray, TorchTensor, AbstractTensor], True),
(Union[NdArray, TorchTensor, Optional[TorchTensor]], True),
(Union[NdArray, TorchTensor, None], True),
],
)
def test_is_union_type_tensor(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(TensorFlowTensor, False),
(Optional[TensorFlowTensor], True),
(Union[NdArray, TorchTensor, TensorFlowTensor], True),
(Union[NdArray, TorchTensor, Optional[TensorFlowTensor]], True),
],
)
def test_is_union_type_tensor_with_tf(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
|
from typing import Dict, Optional, Union
import pytest
from docarray.typing import NdArray, TorchTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._typing import is_tensor_union, is_type_tensor
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
from docarray.typing import TensorFlowTensor
else:
TensorFlowTensor = None
@pytest.mark.parametrize(
'type_, is_tensor',
[
(int, False),
(TorchTensor, True),
(NdArray, True),
(AbstractTensor, True),
(Optional[TorchTensor], False),
(Union[TorchTensor, NdArray], False),
(None, False),
(Dict, False),
],
)
def test_is_type_tensor(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'type_, is_tensor',
[
(TensorFlowTensor, True),
(Optional[TensorFlowTensor], False),
],
)
def test_is_type_tensor_with_tf(type_, is_tensor):
assert is_type_tensor(type_) == is_tensor
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(int, False),
(TorchTensor, False),
(NdArray, False),
(Optional[TorchTensor], True),
(Optional[NdArray], True),
(Union[NdArray, TorchTensor], True),
(Union[NdArray, TorchTensor, AbstractTensor], True),
(Union[NdArray, TorchTensor, Optional[TorchTensor]], True),
(Union[NdArray, TorchTensor, None], True),
],
)
def test_is_union_type_tensor(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'type_, is_union_tensor',
[
(TensorFlowTensor, False),
(Optional[TensorFlowTensor], True),
(Union[NdArray, TorchTensor, TensorFlowTensor], True),
(Union[NdArray, TorchTensor, Optional[TensorFlowTensor]], True),
],
)
def test_is_union_type_tensor_with_tf(type_, is_union_tensor):
assert is_tensor_union(type_) == is_union_tensor
|
from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING: # pragma: no cover
from docarray.document.pydantic_model import PydanticDocumentArray
from docarray.typing import T
from pydantic import BaseModel
class PydanticMixin:
@classmethod
def get_json_schema(cls, indent: int = 2) -> str:
"""Return a JSON Schema of DocumentArray class."""
from pydantic import schema_json_of
from docarray.document.pydantic_model import PydanticDocumentArray
return schema_json_of(
PydanticDocumentArray, title='DocumentArray Schema', indent=indent
)
def to_pydantic_model(self) -> 'PydanticDocumentArray':
"""Convert a DocumentArray object into a Pydantic model."""
return [d.to_pydantic_model() for d in self]
@classmethod
def from_pydantic_model(cls: Type['T'], model: List['BaseModel']) -> 'T':
"""Convert a list of PydanticDocument into DocumentArray
:param model: the list of pydantic data model objects that represents a DocumentArray
:return: a DocumentArray
"""
from docarray import Document
return cls(Document.from_pydantic_model(m) for m in model)
|
from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING:
from docarray.document.pydantic_model import PydanticDocumentArray
from docarray.typing import T
from pydantic import BaseModel
class PydanticMixin:
@classmethod
def get_json_schema(cls, indent: int = 2) -> str:
"""Return a JSON Schema of DocumentArray class."""
from pydantic import schema_json_of
from docarray.document.pydantic_model import PydanticDocumentArray
return schema_json_of(
PydanticDocumentArray, title='DocumentArray Schema', indent=indent
)
def to_pydantic_model(self) -> 'PydanticDocumentArray':
"""Convert a DocumentArray object into a Pydantic model."""
return [d.to_pydantic_model() for d in self]
@classmethod
def from_pydantic_model(cls: Type['T'], model: List['BaseModel']) -> 'T':
"""Convert a list of PydanticDocument into DocumentArray
:param model: the list of pydantic data model objects that represents a DocumentArray
:return: a DocumentArray
"""
from docarray import Document
return cls(Document.from_pydantic_model(m) for m in model)
|
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
import sys
import traceback
from datasets import load_dataset
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_cnn-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to vectors using BERT
word_embedding_model = models.Transformer(model_name)
cnn = models.CNN(
in_word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(),
out_channels=256,
kernel_sizes=[1, 3, 5],
)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
cnn.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, cnn, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="cnn", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-cnn")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-cnn')`."
)
|
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
import sys
import traceback
from datasets import load_dataset
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_cnn-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to vectors using BERT
word_embedding_model = models.Transformer(model_name)
cnn = models.CNN(
in_word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(),
out_channels=256,
kernel_sizes=[1, 3, 5],
)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
cnn.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, cnn, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/losses.html#cosentloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="cnn", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-cnn")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-cnn')`."
)
|
"""JSON node parser."""
import json
from typing import Any, Dict, Generator, List, Optional, Sequence
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.utils import get_tqdm_iterable
class JSONNodeParser(NodeParser):
"""
JSON node parser.
Splits a document into Nodes using custom JSON splitting logic.
Args:
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
@classmethod
def from_defaults(
cls,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
) -> "JSONNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "JSONNodeParser"
def _parse_nodes(
self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any
) -> List[BaseNode]:
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.get_nodes_from_node(node)
all_nodes.extend(nodes)
return all_nodes
def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]:
"""Get nodes from document."""
text = node.get_content(metadata_mode=MetadataMode.NONE)
try:
data = json.loads(text)
except json.JSONDecodeError:
# Handle invalid JSON input here
return []
json_nodes = []
if isinstance(data, dict):
lines = [*self._depth_first_yield(data, 0, [])]
json_nodes.extend(
build_nodes_from_splits(["\n".join(lines)], node, id_func=self.id_func)
)
elif isinstance(data, list):
for json_object in data:
lines = [*self._depth_first_yield(json_object, 0, [])]
json_nodes.extend(
build_nodes_from_splits(
["\n".join(lines)], node, id_func=self.id_func
)
)
else:
raise ValueError("JSON is invalid")
return json_nodes
def _depth_first_yield(
self, json_data: Dict, levels_back: int, path: List[str]
) -> Generator[str, None, None]:
"""
Do depth first yield of all of the leaf nodes of a JSON.
Combines keys in the JSON tree using spaces.
If levels_back is set to 0, prints all levels.
"""
if isinstance(json_data, dict):
for key, value in json_data.items():
new_path = path[:]
new_path.append(key)
yield from self._depth_first_yield(value, levels_back, new_path)
elif isinstance(json_data, list):
for _, value in enumerate(json_data):
yield from self._depth_first_yield(value, levels_back, path)
else:
new_path = path[-levels_back:]
new_path.append(str(json_data))
yield " ".join(new_path)
|
"""JSON node parser."""
import json
from typing import Any, Dict, Generator, List, Optional, Sequence
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.utils import get_tqdm_iterable
class JSONNodeParser(NodeParser):
"""
JSON node parser.
Splits a document into Nodes using custom JSON splitting logic.
Args:
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
@classmethod
def from_defaults(
cls,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
) -> "JSONNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "JSONNodeParser"
def _parse_nodes(
self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any
) -> List[BaseNode]:
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.get_nodes_from_node(node)
all_nodes.extend(nodes)
return all_nodes
def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]:
"""Get nodes from document."""
text = node.get_content(metadata_mode=MetadataMode.NONE)
try:
data = json.loads(text)
except json.JSONDecodeError:
# Handle invalid JSON input here
return []
json_nodes = []
if isinstance(data, dict):
lines = [*self._depth_first_yield(data, 0, [])]
json_nodes.extend(
build_nodes_from_splits(["\n".join(lines)], node, id_func=self.id_func)
)
elif isinstance(data, list):
for json_object in data:
lines = [*self._depth_first_yield(json_object, 0, [])]
json_nodes.extend(
build_nodes_from_splits(
["\n".join(lines)], node, id_func=self.id_func
)
)
else:
raise ValueError("JSON is invalid")
return json_nodes
def _depth_first_yield(
self, json_data: Dict, levels_back: int, path: List[str]
) -> Generator[str, None, None]:
"""
Do depth first yield of all of the leaf nodes of a JSON.
Combines keys in the JSON tree using spaces.
If levels_back is set to 0, prints all levels.
"""
if isinstance(json_data, dict):
for key, value in json_data.items():
new_path = path[:]
new_path.append(key)
yield from self._depth_first_yield(value, levels_back, new_path)
elif isinstance(json_data, list):
for _, value in enumerate(json_data):
yield from self._depth_first_yield(value, levels_back, path)
else:
new_path = path[-levels_back:]
new_path.append(str(json_data))
yield " ".join(new_path)
|
import random
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.utils.data import Dataset
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.testing_utils import (
TestCasePlus,
backend_device_count,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_accelerator,
torch_device,
)
def gather_from_all_gpus(tensor, world_size):
# Prepare a list to gather tensors from all processes
gather_list = [torch.zeros_like(tensor) for _ in range(world_size)]
dist.all_gather(gather_list, tensor)
return gather_list # List of tensors from all ranks
class DummyDataset(Dataset):
def __init__(self):
self.length = 64
def __len__(self):
return self.length
def __getitem__(self, i) -> int:
x = random.random()
y = np.random.random()
z = torch.rand([]).item()
return {"x": torch.tensor([x, y, z])}
class DummyModel(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(3, 1)
def forward(self, x):
local_tensor = torch.tensor(x, device=torch_device)
gathered = gather_from_all_gpus(local_tensor, dist.get_world_size())
assert not all(torch.allclose(t, gathered[0]) for t in gathered[1:])
y = self.fc(x)
return (y.mean(), y)
class TestTrainerDistributedWorkerSeed(TestCasePlus):
@require_torch_multi_accelerator
def test_trainer(self):
device_count = backend_device_count(torch_device)
output_dir = self.get_auto_remove_tmp_dir()
distributed_args = f"""--nproc_per_node={device_count}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed_worker_seed.py
""".split()
args = f"--output_dir {output_dir}".split()
cmd = ["torchrun"] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
def run_distributed_training(training_args):
set_seed(42)
model = DummyModel()
dataset = DummyDataset()
training_args.max_steps = 10
# dataloader_num_workers must be > 0 to enable worker_init_fn
training_args.dataloader_num_workers = 2
trainer = Trainer(
model,
training_args,
train_dataset=dataset,
)
trainer.train()
if __name__ == "__main__":
parser = HfArgumentParser((TrainingArguments,))
training_args = parser.parse_args_into_dataclasses()[0]
run_distributed_training(training_args)
|
import random
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.utils.data import Dataset
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
)
def gather_from_all_gpus(tensor, world_size):
# Prepare a list to gather tensors from all processes
gather_list = [torch.zeros_like(tensor) for _ in range(world_size)]
dist.all_gather(gather_list, tensor)
return gather_list # List of tensors from all ranks
class DummyDataset(Dataset):
def __init__(self):
self.length = 64
def __len__(self):
return self.length
def __getitem__(self, i) -> int:
x = random.random()
y = np.random.random()
z = torch.rand([]).item()
return {"x": torch.tensor([x, y, z])}
class DummyModel(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(3, 1)
def forward(self, x):
local_tensor = torch.tensor(x, device="cuda")
gathered = gather_from_all_gpus(local_tensor, dist.get_world_size())
assert not all(torch.allclose(t, gathered[0]) for t in gathered[1:])
y = self.fc(x)
return (y.mean(), y)
class TestTrainerDistributedWorkerSeed(TestCasePlus):
@require_torch_multi_gpu
def test_trainer(self):
device_count = torch.cuda.device_count()
output_dir = self.get_auto_remove_tmp_dir()
distributed_args = f"""--nproc_per_node={device_count}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed_worker_seed.py
""".split()
args = f"--output_dir {output_dir}".split()
cmd = ["torchrun"] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
def run_distributed_training(training_args):
set_seed(42)
model = DummyModel()
dataset = DummyDataset()
training_args.max_steps = 10
# dataloader_num_workers must be > 0 to enable worker_init_fn
training_args.dataloader_num_workers = 2
trainer = Trainer(
model,
training_args,
train_dataset=dataset,
)
trainer.train()
if __name__ == "__main__":
parser = HfArgumentParser((TrainingArguments,))
training_args = parser.parse_args_into_dataclasses()[0]
run_distributed_training(training_args)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
data = dict(samples_per_gpu=8, workers_per_gpu=4)
optimizer = dict(lr=0.01)
fp16 = dict(loss_scale=512.)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
data = dict(samples_per_gpu=8, workers_per_gpu=4)
optimizer = dict(lr=0.01)
fp16 = dict(loss_scale=512.)
|
import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform, extract_archive
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
SAMPLE_RATE = 8000
_CHECKSUM = "4f869e06bc066bbe9c5dde31dbd3909a0870d70291110ebbb38878dcbc2fc5e4"
_LANGUAGES = [
"albanian",
"basque",
"czech",
"nnenglish",
"romanian",
"slovak",
]
class QUESST14(Dataset):
"""Create *QUESST14* :cite:`Mir2015QUESST2014EQ` Dataset
Args:
root (str or Path): Root directory where the dataset's top level directory is found
subset (str): Subset of the dataset to use. Options: [``"docs"``, ``"dev"``, ``"eval"``].
language (str or None, optional): Language to get dataset for.
Options: [``None``, ``albanian``, ``basque``, ``czech``, ``nnenglish``, ``romanian``, ``slovak``].
If ``None``, dataset consists of all languages. (default: ``"nnenglish"``)
download (bool, optional): Whether to download the dataset if it is not found at root path.
(default: ``False``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str,
language: Optional[str] = "nnenglish",
download: bool = False,
) -> None:
if subset not in ["docs", "dev", "eval"]:
raise ValueError("`subset` must be one of ['docs', 'dev', 'eval']")
if language is not None and language not in _LANGUAGES:
raise ValueError(f"`language` must be None or one of {str(_LANGUAGES)}")
# Get string representation of 'root'
root = os.fspath(root)
basename = os.path.basename(URL)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
self._path = os.path.join(root, basename)
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
download_url_to_file(URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive, root)
if subset == "docs":
self.data = filter_audio_paths(self._path, language, "language_key_utterances.lst")
elif subset == "dev":
self.data = filter_audio_paths(self._path, language, "language_key_dev.lst")
elif subset == "eval":
self.data = filter_audio_paths(self._path, language, "language_key_eval.lst")
def get_metadata(self, n: int) -> Tuple[str, int, str]:
"""Get metadata for the n-th sample from the dataset. Returns filepath instead of waveform,
but otherwise returns the same fields as :py:func:`__getitem__`.
Args:
n (int): The index of the sample to be loaded
Returns:
(str, int, str):
``(filepath, sample_rate, file_name)``
"""
audio_path = self.data[n]
relpath = os.path.relpath(audio_path, self._path)
return relpath, SAMPLE_RATE, audio_path.with_suffix("").name
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str): ``(waveform, sample_rate, file_name)``
"""
metadata = self.get_metadata(n)
waveform = _load_waveform(self._path, metadata[0], metadata[1])
return (waveform,) + metadata[1:]
def __len__(self) -> int:
return len(self.data)
def filter_audio_paths(
path: str,
language: str,
lst_name: str,
):
"""Extract audio paths for the given language."""
audio_paths = []
path = Path(path)
with open(path / "scoring" / lst_name) as f:
for line in f:
audio_path, lang = line.strip().split()
if language is not None and lang != language:
continue
audio_path = re.sub(r"^.*?\/", "", audio_path)
audio_paths.append(path / audio_path)
return audio_paths
|
import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
import torchaudio
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
_CHECKSUM = "4f869e06bc066bbe9c5dde31dbd3909a0870d70291110ebbb38878dcbc2fc5e4"
_LANGUAGES = [
"albanian",
"basque",
"czech",
"nnenglish",
"romanian",
"slovak",
]
class QUESST14(Dataset):
"""Create *QUESST14* :cite:`Mir2015QUESST2014EQ` Dataset
Args:
root (str or Path): Root directory where the dataset's top level directory is found
subset (str): Subset of the dataset to use. Options: [``"docs"``, ``"dev"``, ``"eval"``].
language (str or None, optional): Language to get dataset for.
Options: [``None``, ``albanian``, ``basque``, ``czech``, ``nnenglish``, ``romanian``, ``slovak``].
If ``None``, dataset consists of all languages. (default: ``"nnenglish"``)
download (bool, optional): Whether to download the dataset if it is not found at root path.
(default: ``False``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str,
language: Optional[str] = "nnenglish",
download: bool = False,
) -> None:
if subset not in ["docs", "dev", "eval"]:
raise ValueError("`subset` must be one of ['docs', 'dev', 'eval']")
if language is not None and language not in _LANGUAGES:
raise ValueError(f"`language` must be None or one of {str(_LANGUAGES)}")
# Get string representation of 'root'
root = os.fspath(root)
basename = os.path.basename(URL)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
self._path = os.path.join(root, basename)
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
download_url_to_file(URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive, root)
if subset == "docs":
self.data = filter_audio_paths(self._path, language, "language_key_utterances.lst")
elif subset == "dev":
self.data = filter_audio_paths(self._path, language, "language_key_dev.lst")
elif subset == "eval":
self.data = filter_audio_paths(self._path, language, "language_key_eval.lst")
def _load_sample(self, n: int) -> Tuple[torch.Tensor, int, str]:
audio_path = self.data[n]
wav, sample_rate = torchaudio.load(audio_path)
return wav, sample_rate, audio_path.with_suffix("").name
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str): ``(waveform, sample_rate, file_name)``
"""
return self._load_sample(n)
def __len__(self) -> int:
return len(self.data)
def filter_audio_paths(
path: str,
language: str,
lst_name: str,
):
"""Extract audio paths for the given language."""
audio_paths = []
path = Path(path)
with open(path / "scoring" / lst_name) as f:
for line in f:
audio_path, lang = line.strip().split()
if language is not None and lang != language:
continue
audio_path = re.sub(r"^.*?\/", "", audio_path)
audio_paths.append(path / audio_path)
return audio_paths
|
from __future__ import annotations
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
import torch
from torchvision._utils import sequence_to_str
from torchvision.prototype import datapoints
from torchvision.prototype.datapoints._datapoint import Datapoint
from torchvision.prototype.transforms.functional import get_dimensions, get_spatial_size
def is_simple_tensor(inpt: Any) -> bool:
return isinstance(inpt, torch.Tensor) and not isinstance(inpt, Datapoint)
def query_bounding_box(flat_inputs: List[Any]) -> datapoints.BoundingBox:
bounding_boxes = [inpt for inpt in flat_inputs if isinstance(inpt, datapoints.BoundingBox)]
if not bounding_boxes:
raise TypeError("No bounding box was found in the sample")
elif len(bounding_boxes) > 1:
raise ValueError("Found multiple bounding boxes in the sample")
return bounding_boxes.pop()
def query_chw(flat_inputs: List[Any]) -> Tuple[int, int, int]:
chws = {
tuple(get_dimensions(inpt))
for inpt in flat_inputs
if isinstance(inpt, (datapoints.Image, PIL.Image.Image, datapoints.Video)) or is_simple_tensor(inpt)
}
if not chws:
raise TypeError("No image or video was found in the sample")
elif len(chws) > 1:
raise ValueError(f"Found multiple CxHxW dimensions in the sample: {sequence_to_str(sorted(chws))}")
c, h, w = chws.pop()
return c, h, w
def query_spatial_size(flat_inputs: List[Any]) -> Tuple[int, int]:
sizes = {
tuple(get_spatial_size(inpt))
for inpt in flat_inputs
if isinstance(
inpt, (datapoints.Image, PIL.Image.Image, datapoints.Video, datapoints.Mask, datapoints.BoundingBox)
)
or is_simple_tensor(inpt)
}
if not sizes:
raise TypeError("No image, video, mask or bounding box was found in the sample")
elif len(sizes) > 1:
raise ValueError(f"Found multiple HxW dimensions in the sample: {sequence_to_str(sorted(sizes))}")
h, w = sizes.pop()
return h, w
def check_type(obj: Any, types_or_checks: Tuple[Union[Type, Callable[[Any], bool]], ...]) -> bool:
for type_or_check in types_or_checks:
if isinstance(obj, type_or_check) if isinstance(type_or_check, type) else type_or_check(obj):
return True
return False
def has_any(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for inpt in flat_inputs:
if check_type(inpt, types_or_checks):
return True
return False
def has_all(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for type_or_check in types_or_checks:
for inpt in flat_inputs:
if isinstance(inpt, type_or_check) if isinstance(type_or_check, type) else type_or_check(inpt):
break
else:
return False
return True
|
from typing import Any, Callable, List, Tuple, Type, Union
import PIL.Image
from torchvision._utils import sequence_to_str
from torchvision.prototype import features
from torchvision.prototype.transforms.functional import get_dimensions, get_spatial_size
def query_bounding_box(flat_inputs: List[Any]) -> features.BoundingBox:
bounding_boxes = [inpt for inpt in flat_inputs if isinstance(inpt, features.BoundingBox)]
if not bounding_boxes:
raise TypeError("No bounding box was found in the sample")
elif len(bounding_boxes) > 1:
raise ValueError("Found multiple bounding boxes in the sample")
return bounding_boxes.pop()
def query_chw(flat_inputs: List[Any]) -> Tuple[int, int, int]:
chws = {
tuple(get_dimensions(inpt))
for inpt in flat_inputs
if isinstance(inpt, (features.Image, PIL.Image.Image, features.Video)) or features.is_simple_tensor(inpt)
}
if not chws:
raise TypeError("No image or video was found in the sample")
elif len(chws) > 1:
raise ValueError(f"Found multiple CxHxW dimensions in the sample: {sequence_to_str(sorted(chws))}")
c, h, w = chws.pop()
return c, h, w
def query_spatial_size(flat_inputs: List[Any]) -> Tuple[int, int]:
sizes = {
tuple(get_spatial_size(inpt))
for inpt in flat_inputs
if isinstance(inpt, (features.Image, PIL.Image.Image, features.Video, features.Mask, features.BoundingBox))
or features.is_simple_tensor(inpt)
}
if not sizes:
raise TypeError("No image, video, mask or bounding box was found in the sample")
elif len(sizes) > 1:
raise ValueError(f"Found multiple HxW dimensions in the sample: {sequence_to_str(sorted(sizes))}")
h, w = sizes.pop()
return h, w
def check_type(obj: Any, types_or_checks: Tuple[Union[Type, Callable[[Any], bool]], ...]) -> bool:
for type_or_check in types_or_checks:
if isinstance(obj, type_or_check) if isinstance(type_or_check, type) else type_or_check(obj):
return True
return False
def has_any(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for inpt in flat_inputs:
if check_type(inpt, types_or_checks):
return True
return False
def has_all(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for type_or_check in types_or_checks:
for inpt in flat_inputs:
if isinstance(inpt, type_or_check) if isinstance(type_or_check, type) else type_or_check(inpt):
break
else:
return False
return True
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`, :class:`~sentence_transformers.sampler.DefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.DefaultBatchSampler`]], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`, :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`]], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter name regular expressions to learning rates. This allows you to set different
learning rates for different parts of the model, e.g., `{'SparseStaticEmbedding\.*': 1e-3}` for the
SparseStaticEmbedding module. This is useful when you want to fine-tune specific parts of the model
with different learning rates.
"""
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`, :class:`~sentence_transformers.sampler.DefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.DefaultBatchSampler`]], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`, :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`]], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter name regular expressions to learning rates. This allows you to set different
learning rates for different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is
useful when you want to fine-tune specific parts of the model with different learning rates.
"""
|
PREFIX = """You are an agent designed to answer questions about sets of documents.
You have access to tools for interacting with the documents, and the inputs to the tools are questions.
Sometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool to do so.
If the question does not seem relevant to any of the tools provided, just return "I don't know" as the answer.
""" # noqa: E501
ROUTER_PREFIX = """You are an agent designed to answer questions.
You have access to tools for interacting with different sources, and the inputs to the tools are questions.
Your main task is to decide which of the tools is relevant for answering question at hand.
For complex questions, you can break the question down into sub questions and use tools to answers the sub questions.
""" # noqa: E501
|
# flake8: noqa
PREFIX = """You are an agent designed to answer questions about sets of documents.
You have access to tools for interacting with the documents, and the inputs to the tools are questions.
Sometimes, you will be asked to provide sources for your questions, in which case you should use the appropriate tool to do so.
If the question does not seem relevant to any of the tools provided, just return "I don't know" as the answer.
"""
ROUTER_PREFIX = """You are an agent designed to answer questions.
You have access to tools for interacting with different sources, and the inputs to the tools are questions.
Your main task is to decide which of the tools is relevant for answering question at hand.
For complex questions, you can break the question down into sub questions and use tools to answers the sub questions.
"""
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
_base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
# training schedule for 3x
train_cfg = dict(max_epochs=max_epochs)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 640), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './paa_r50_fpn_1x_coco.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline))
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
"""Script to generate migrations for the migration script."""
import json
import os
import pkgutil
from typing import Optional
import click
from langchain_cli.namespaces.migrate.generate.generic import (
generate_simplified_migrations,
)
from langchain_cli.namespaces.migrate.generate.grit import (
dump_migrations_as_grit,
)
from langchain_cli.namespaces.migrate.generate.partner import (
get_migrations_for_partner_package,
)
@click.group()
def cli() -> None:
"""Migration scripts management."""
@cli.command()
@click.option(
"--pkg1",
default="langchain",
)
@click.option(
"--pkg2",
default="langchain_community",
)
@click.option(
"--output",
default=None,
help="Output file for the migration script.",
)
@click.option(
"--filter-by-all/--no-filter-by-all",
default=True,
help="Output file for the migration script.",
)
@click.option(
"--format",
"format_",
type=click.Choice(["json", "grit"], case_sensitive=False),
default="json",
help="The output format for the migration script (json or grit).",
)
def generic(
pkg1: str,
pkg2: str,
output: str,
filter_by_all: bool, # noqa: FBT001
format_: str,
) -> None:
"""Generate a migration script."""
click.echo("Migration script generated.")
migrations = generate_simplified_migrations(pkg1, pkg2, filter_by_all=filter_by_all)
if output is not None:
name = output.removesuffix(".json").removesuffix(".grit")
else:
name = f"{pkg1}_to_{pkg2}"
if output is None:
output = f"{name}.json" if format_ == "json" else f"{name}.grit"
if format_ == "json":
dumped = json.dumps(migrations, indent=2, sort_keys=True)
else:
dumped = dump_migrations_as_grit(name, migrations)
with open(output, "w") as f:
f.write(dumped)
def handle_partner(pkg: str, output: Optional[str] = None) -> None:
migrations = get_migrations_for_partner_package(pkg)
# Run with python 3.9+
name = pkg.removeprefix("langchain_")
data = dump_migrations_as_grit(name, migrations)
output_name = f"{name}.grit" if output is None else output
if migrations:
with open(output_name, "w") as f:
f.write(data)
click.secho(f"LangChain migration script saved to {output_name}")
else:
click.secho(f"No migrations found for {pkg}", fg="yellow")
@cli.command()
@click.argument("pkg")
@click.option("--output", default=None, help="Output file for the migration script.")
def partner(pkg: str, output: str) -> None:
"""Generate migration scripts specifically for LangChain modules."""
click.echo("Migration script for LangChain generated.")
handle_partner(pkg, output)
@cli.command()
@click.argument("json_file")
def json_to_grit(json_file: str) -> None:
"""Generate a Grit migration from an old JSON migration file."""
with open(json_file) as f:
migrations = json.load(f)
name = os.path.basename(json_file).removesuffix(".json").removesuffix(".grit")
data = dump_migrations_as_grit(name, migrations)
output_name = f"{name}.grit"
with open(output_name, "w") as f:
f.write(data)
click.secho(f"GritQL migration script saved to {output_name}")
@cli.command()
def all_installed_partner_pkgs() -> None:
"""Generate migration scripts for all LangChain modules."""
# Will generate migrations for all partner packages.
# Define as "langchain_<partner_name>".
# First let's determine which packages are installed in the environment
# and then generate migrations for them.
langchain_pkgs = [
name
for _, name, _ in pkgutil.iter_modules()
if name.startswith("langchain_")
and name not in {"langchain_core", "langchain_cli", "langchain_community"}
]
for pkg in langchain_pkgs:
handle_partner(pkg)
if __name__ == "__main__":
cli()
|
# type: ignore
"""Script to generate migrations for the migration script."""
import json
import os
import pkgutil
from typing import Optional
import click
from langchain_cli.namespaces.migrate.generate.generic import (
generate_simplified_migrations,
)
from langchain_cli.namespaces.migrate.generate.grit import (
dump_migrations_as_grit,
)
from langchain_cli.namespaces.migrate.generate.partner import (
get_migrations_for_partner_package,
)
@click.group()
def cli() -> None:
"""Migration scripts management."""
@cli.command()
@click.option(
"--pkg1",
default="langchain",
)
@click.option(
"--pkg2",
default="langchain_community",
)
@click.option(
"--output",
default=None,
help="Output file for the migration script.",
)
@click.option(
"--filter-by-all/--no-filter-by-all",
default=True,
help="Output file for the migration script.",
)
@click.option(
"--format",
type=click.Choice(["json", "grit"], case_sensitive=False),
default="json",
help="The output format for the migration script (json or grit).",
)
def generic(
pkg1: str, pkg2: str, output: str, filter_by_all: bool, format: str
) -> None:
"""Generate a migration script."""
click.echo("Migration script generated.")
migrations = generate_simplified_migrations(pkg1, pkg2, filter_by_all=filter_by_all)
if output is not None:
name = output.removesuffix(".json").removesuffix(".grit")
else:
name = f"{pkg1}_to_{pkg2}"
if output is None:
output = f"{name}.json" if format == "json" else f"{name}.grit"
if format == "json":
dumped = json.dumps(migrations, indent=2, sort_keys=True)
else:
dumped = dump_migrations_as_grit(name, migrations)
with open(output, "w") as f:
f.write(dumped)
def handle_partner(pkg: str, output: Optional[str] = None) -> None:
migrations = get_migrations_for_partner_package(pkg)
# Run with python 3.9+
name = pkg.removeprefix("langchain_")
data = dump_migrations_as_grit(name, migrations)
output_name = f"{name}.grit" if output is None else output
if migrations:
with open(output_name, "w") as f:
f.write(data)
click.secho(f"LangChain migration script saved to {output_name}")
else:
click.secho(f"No migrations found for {pkg}", fg="yellow")
@cli.command()
@click.argument("pkg")
@click.option("--output", default=None, help="Output file for the migration script.")
def partner(pkg: str, output: str) -> None:
"""Generate migration scripts specifically for LangChain modules."""
click.echo("Migration script for LangChain generated.")
handle_partner(pkg, output)
@cli.command()
@click.argument("json_file")
def json_to_grit(json_file: str) -> None:
"""Generate a Grit migration from an old JSON migration file."""
with open(json_file) as f:
migrations = json.load(f)
name = os.path.basename(json_file).removesuffix(".json").removesuffix(".grit")
data = dump_migrations_as_grit(name, migrations)
output_name = f"{name}.grit"
with open(output_name, "w") as f:
f.write(data)
click.secho(f"GritQL migration script saved to {output_name}")
@cli.command()
def all_installed_partner_pkgs() -> None:
"""Generate migration scripts for all LangChain modules."""
# Will generate migrations for all partner packages.
# Define as "langchain_<partner_name>".
# First let's determine which packages are installed in the environment
# and then generate migrations for them.
langchain_pkgs = [
name
for _, name, _ in pkgutil.iter_modules()
if name.startswith("langchain_")
and name not in {"langchain_core", "langchain_cli", "langchain_community"}
]
for pkg in langchain_pkgs:
handle_partner(pkg)
if __name__ == "__main__":
cli()
|
from abc import ABC, abstractmethod
from typing import Callable, List, Sequence, Optional, Union, Any
from llama_index.core.agent.workflow.workflow_events import (
AgentOutput,
ToolCallResult,
)
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
ConfigDict,
field_validator,
)
from llama_index.core.llms import ChatMessage, LLM
from llama_index.core.memory import BaseMemory
from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType, PromptDictType
from llama_index.core.tools import BaseTool, AsyncBaseTool, FunctionTool
from llama_index.core.workflow import Context
from llama_index.core.objects import ObjectRetriever
from llama_index.core.settings import Settings
from llama_index.core.workflow.checkpointer import CheckpointCallback
from llama_index.core.workflow.handler import WorkflowHandler
DEFAULT_AGENT_NAME = "Agent"
DEFAULT_AGENT_DESCRIPTION = "An agent that can perform a task"
def get_default_llm() -> LLM:
return Settings.llm
class BaseWorkflowAgent(BaseModel, PromptMixin, ABC):
"""Base class for all agents, combining config and logic."""
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str = Field(default=DEFAULT_AGENT_NAME, description="The name of the agent")
description: str = Field(
default=DEFAULT_AGENT_DESCRIPTION,
description="The description of what the agent does and is responsible for",
)
system_prompt: Optional[str] = Field(
default=None, description="The system prompt for the agent"
)
tools: Optional[List[Union[BaseTool, Callable]]] = Field(
default=None, description="The tools that the agent can use"
)
tool_retriever: Optional[ObjectRetriever] = Field(
default=None,
description="The tool retriever for the agent, can be provided instead of tools",
)
can_handoff_to: Optional[List[str]] = Field(
default=None, description="The agent names that this agent can hand off to"
)
llm: LLM = Field(
default_factory=get_default_llm, description="The LLM that the agent uses"
)
@field_validator("tools", mode="before")
def validate_tools(
cls, v: Optional[Sequence[Union[BaseTool, Callable]]]
) -> Optional[Sequence[BaseTool]]:
"""Validate tools.
If tools are not of type BaseTool, they will be converted to FunctionTools.
This assumes the inputs are tools or callable functions.
"""
if v is None:
return None
validated_tools: List[BaseTool] = []
for tool in v:
if not isinstance(tool, BaseTool):
validated_tools.append(FunctionTool.from_defaults(tool))
else:
validated_tools.append(tool)
for tool in validated_tools:
if tool.metadata.name == "handoff":
raise ValueError(
"'handoff' is a reserved tool name. Please use a different name."
)
return validated_tools # type: ignore[return-value]
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {}
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
"""Update prompts."""
@abstractmethod
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the agent."""
@abstractmethod
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results."""
@abstractmethod
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""Finalize the agent's execution."""
@abstractmethod
def run(
self,
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
ctx: Optional[Context] = None,
stepwise: bool = False,
checkpoint_callback: Optional[CheckpointCallback] = None,
**workflow_kwargs: Any,
) -> WorkflowHandler:
"""Run the agent."""
|
from abc import ABC, abstractmethod
from typing import Callable, List, Sequence, Optional, Union, Any
from llama_index.core.agent.workflow.workflow_events import (
AgentOutput,
ToolCallResult,
)
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
ConfigDict,
field_validator,
)
from llama_index.core.llms import ChatMessage, LLM
from llama_index.core.memory import BaseMemory
from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType, PromptDictType
from llama_index.core.tools import BaseTool, AsyncBaseTool, FunctionTool
from llama_index.core.workflow import Context
from llama_index.core.objects import ObjectRetriever
from llama_index.core.settings import Settings
from llama_index.core.workflow.checkpointer import CheckpointCallback
from llama_index.core.workflow.handler import WorkflowHandler
def get_default_llm() -> LLM:
return Settings.llm
class BaseWorkflowAgent(BaseModel, PromptMixin, ABC):
"""Base class for all agents, combining config and logic."""
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str = Field(description="The name of the agent")
description: str = Field(
description="The description of what the agent does and is responsible for"
)
system_prompt: Optional[str] = Field(
default=None, description="The system prompt for the agent"
)
tools: Optional[List[Union[BaseTool, Callable]]] = Field(
default=None, description="The tools that the agent can use"
)
tool_retriever: Optional[ObjectRetriever] = Field(
default=None,
description="The tool retriever for the agent, can be provided instead of tools",
)
can_handoff_to: Optional[List[str]] = Field(
default=None, description="The agent names that this agent can hand off to"
)
llm: LLM = Field(
default_factory=get_default_llm, description="The LLM that the agent uses"
)
@field_validator("tools", mode="before")
def validate_tools(
cls, v: Optional[Sequence[Union[BaseTool, Callable]]]
) -> Optional[Sequence[BaseTool]]:
"""Validate tools.
If tools are not of type BaseTool, they will be converted to FunctionTools.
This assumes the inputs are tools or callable functions.
"""
if v is None:
return None
validated_tools: List[BaseTool] = []
for tool in v:
if not isinstance(tool, BaseTool):
validated_tools.append(FunctionTool.from_defaults(tool))
else:
validated_tools.append(tool)
for tool in validated_tools:
if tool.metadata.name == "handoff":
raise ValueError(
"'handoff' is a reserved tool name. Please use a different name."
)
return validated_tools # type: ignore[return-value]
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {}
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
"""Update prompts."""
@abstractmethod
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the agent."""
@abstractmethod
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results."""
@abstractmethod
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""Finalize the agent's execution."""
@abstractmethod
def run(
self,
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
ctx: Optional[Context] = None,
stepwise: bool = False,
checkpoint_callback: Optional[CheckpointCallback] = None,
**workflow_kwargs: Any,
) -> WorkflowHandler:
"""Run the agent."""
|
# Copyright (c) OpenMMLab. All rights reserved.
from .manager import ManagerMeta, ManagerMixin
from .misc import (apply_to, check_prerequisites, concat_list,
deprecated_api_warning, deprecated_function,
get_object_from_string, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, requires_executable, requires_package,
slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple,
to_ntuple, tuple_cast)
from .package_utils import (call_command, get_installed_path, install_package,
is_installed)
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .progressbar import (ProgressBar, track_iter_progress,
track_parallel_progress, track_progress)
from .progressbar_rich import track_progress_rich
from .timer import Timer, TimerError, check_time
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_installed', 'call_command', 'get_installed_path', 'install_package',
'is_abs', 'is_method_overridden', 'has_method', 'digit_version',
'get_git_hash', 'ManagerMeta', 'ManagerMixin', 'Timer', 'check_time',
'TimerError', 'ProgressBar', 'track_iter_progress',
'track_parallel_progress', 'track_progress', 'deprecated_function',
'apply_to', 'track_progress_rich', 'get_object_from_string'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .manager import ManagerMeta, ManagerMixin
from .misc import (apply_to, check_prerequisites, concat_list,
deprecated_api_warning, deprecated_function,
get_object_from_string, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, requires_executable, requires_package,
slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple,
to_ntuple, tuple_cast)
from .package_utils import (call_command, get_installed_path, install_package,
is_installed)
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .progressbar import (ProgressBar, track_iter_progress,
track_parallel_progress, track_progress)
from .timer import Timer, TimerError, check_time
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_installed', 'call_command', 'get_installed_path', 'install_package',
'is_abs', 'is_method_overridden', 'has_method', 'digit_version',
'get_git_hash', 'ManagerMeta', 'ManagerMixin', 'Timer', 'check_time',
'TimerError', 'ProgressBar', 'track_iter_progress',
'track_parallel_progress', 'track_progress', 'deprecated_function',
'apply_to', 'get_object_from_string'
]
|
from typing import overload, Dict, Optional, List, TYPE_CHECKING, Sequence, Any
from docarray.document.data import DocumentData
from docarray.document.mixins import AllMixins
from docarray.base import BaseDCType
from docarray.math.ndarray import detach_tensor_if_present
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import ArrayType, StructValueType, DocumentContentType
class Document(AllMixins, BaseDCType):
"""Document is the basic data type in DocArray.
A Document is a container for any kind of data, be it text, image, audio, video, or 3D meshes.
You can initialize a Document object with given attributes:
.. code-block:: python
from docarray import Document
import numpy
d1 = Document(text='hello')
d3 = Document(tensor=numpy.array([1, 2, 3]))
d4 = Document(
uri='https://jina.ai',
mime_type='text/plain',
granularity=1,
adjacency=3,
tags={'foo': 'bar'},
)
Documents support a :ref:`nested structure <recursive-nested-document>`, which can also be specified during construction:
.. code-block:: python
d = Document(
id='d0',
chunks=[Document(id='d1', chunks=Document(id='d2'))],
matches=[Document(id='d3')],
)
A Document can embed its contents using the :meth:`embed` method and a provided embedding model:
.. code-block:: python
import torchvision
q = (
Document(uri='/Users/usr/path/to/image.jpg')
.load_uri_to_image_tensor()
.set_image_tensor_normalization()
.set_image_tensor_channel_axis(-1, 0)
)
model = torchvision.models.resnet50(pretrained=True)
q.embed(model)
Multiple Documents can be organized into a :class:`~docarray.array.document.DocumentArray`.
.. seealso::
For further details, see our :ref:`user guide <document>`.
"""
_data_class = DocumentData
_unresolved_fields_dest = 'tags'
_post_init_fields = (
'text',
'blob',
'tensor',
'content',
'uri',
'mime_type',
'chunks',
'matches',
)
@overload
def __init__(self):
"""Create an empty Document."""
...
@overload
def __init__(self, _obj: Optional['Document'] = None, copy: bool = False):
...
@overload
def __init__(self, _obj: Optional[Any] = None):
"""Create a Document from a `docarray.dataclass` instance"""
...
@overload
def __init__(
self,
_obj: Optional[Dict],
copy: bool = False,
field_resolver: Optional[Dict[str, str]] = None,
unknown_fields_handler: str = 'catch',
):
...
@overload
def __init__(self, blob: Optional[bytes] = None, **kwargs):
"""Create a Document with binary content."""
...
@overload
def __init__(self, tensor: Optional['ArrayType'] = None, **kwargs):
"""Create a Document with NdArray-like content."""
...
@overload
def __init__(self, text: Optional[str] = None, **kwargs):
"""Create a Document with string content."""
...
@overload
def __init__(self, uri: Optional[str] = None, **kwargs):
"""Create a Document with content from a URI."""
...
@overload
def __init__(
self,
parent_id: Optional[str] = None,
granularity: Optional[int] = None,
adjacency: Optional[int] = None,
blob: Optional[bytes] = None,
tensor: Optional['ArrayType'] = None,
mime_type: Optional[str] = None,
text: Optional[str] = None,
content: Optional['DocumentContentType'] = None,
weight: Optional[float] = None,
uri: Optional[str] = None,
tags: Optional[Dict[str, 'StructValueType']] = None,
offset: Optional[float] = None,
location: Optional[List[float]] = None,
embedding: Optional['ArrayType'] = None,
modality: Optional[str] = None,
evaluations: Optional[Dict[str, Dict[str, 'StructValueType']]] = None,
scores: Optional[Dict[str, Dict[str, 'StructValueType']]] = None,
chunks: Optional[Sequence['Document']] = None,
matches: Optional[Sequence['Document']] = None,
):
...
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getstate__(self):
state = self.__dict__.copy()
for attribute in ['embedding', 'tensor']:
if hasattr(self, attribute):
setattr(
state['_data'],
attribute,
detach_tensor_if_present(getattr(state['_data'], attribute)),
)
return state
|
from typing import overload, Dict, Optional, List, TYPE_CHECKING, Sequence, Any
from docarray.document.data import DocumentData
from docarray.document.mixins import AllMixins
from docarray.base import BaseDCType
from docarray.math.ndarray import detach_tensor_if_present
if TYPE_CHECKING:
from docarray.typing import ArrayType, StructValueType, DocumentContentType
class Document(AllMixins, BaseDCType):
"""Document is the basic data type in DocArray.
A Document is a container for any kind of data, be it text, image, audio, video, or 3D meshes.
You can initialize a Document object with given attributes:
.. code-block:: python
from docarray import Document
import numpy
d1 = Document(text='hello')
d3 = Document(tensor=numpy.array([1, 2, 3]))
d4 = Document(
uri='https://jina.ai',
mime_type='text/plain',
granularity=1,
adjacency=3,
tags={'foo': 'bar'},
)
Documents support a :ref:`nested structure <recursive-nested-document>`, which can also be specified during construction:
.. code-block:: python
d = Document(
id='d0',
chunks=[Document(id='d1', chunks=Document(id='d2'))],
matches=[Document(id='d3')],
)
A Document can embed its contents using the :meth:`embed` method and a provided embedding model:
.. code-block:: python
import torchvision
q = (
Document(uri='/Users/usr/path/to/image.jpg')
.load_uri_to_image_tensor()
.set_image_tensor_normalization()
.set_image_tensor_channel_axis(-1, 0)
)
model = torchvision.models.resnet50(pretrained=True)
q.embed(model)
Multiple Documents can be organized into a :class:`~docarray.array.document.DocumentArray`.
.. seealso::
For further details, see our :ref:`user guide <document>`.
"""
_data_class = DocumentData
_unresolved_fields_dest = 'tags'
_post_init_fields = (
'text',
'blob',
'tensor',
'content',
'uri',
'mime_type',
'chunks',
'matches',
)
@overload
def __init__(self):
"""Create an empty Document."""
...
@overload
def __init__(self, _obj: Optional['Document'] = None, copy: bool = False):
...
@overload
def __init__(self, _obj: Optional[Any] = None):
"""Create a Document from a `docarray.dataclass` instance"""
...
@overload
def __init__(
self,
_obj: Optional[Dict],
copy: bool = False,
field_resolver: Optional[Dict[str, str]] = None,
unknown_fields_handler: str = 'catch',
):
...
@overload
def __init__(self, blob: Optional[bytes] = None, **kwargs):
"""Create a Document with binary content."""
...
@overload
def __init__(self, tensor: Optional['ArrayType'] = None, **kwargs):
"""Create a Document with NdArray-like content."""
...
@overload
def __init__(self, text: Optional[str] = None, **kwargs):
"""Create a Document with string content."""
...
@overload
def __init__(self, uri: Optional[str] = None, **kwargs):
"""Create a Document with content from a URI."""
...
@overload
def __init__(
self,
parent_id: Optional[str] = None,
granularity: Optional[int] = None,
adjacency: Optional[int] = None,
blob: Optional[bytes] = None,
tensor: Optional['ArrayType'] = None,
mime_type: Optional[str] = None,
text: Optional[str] = None,
content: Optional['DocumentContentType'] = None,
weight: Optional[float] = None,
uri: Optional[str] = None,
tags: Optional[Dict[str, 'StructValueType']] = None,
offset: Optional[float] = None,
location: Optional[List[float]] = None,
embedding: Optional['ArrayType'] = None,
modality: Optional[str] = None,
evaluations: Optional[Dict[str, Dict[str, 'StructValueType']]] = None,
scores: Optional[Dict[str, Dict[str, 'StructValueType']]] = None,
chunks: Optional[Sequence['Document']] = None,
matches: Optional[Sequence['Document']] = None,
):
...
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getstate__(self):
state = self.__dict__.copy()
for attribute in ['embedding', 'tensor']:
if hasattr(self, attribute):
setattr(
state['_data'],
attribute,
detach_tensor_if_present(getattr(state['_data'], attribute)),
)
return state
|
import math
import os
import pytest
import torch
from torchvision.io import _HAS_GPU_VIDEO_DECODER, VideoReader
try:
import av
except ImportError:
av = None
VIDEO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "videos")
@pytest.mark.skipif(_HAS_GPU_VIDEO_DECODER is False, reason="Didn't compile with support for gpu decoder")
class TestVideoGPUDecoder:
@pytest.mark.skipif(av is None, reason="PyAV unavailable")
@pytest.mark.parametrize(
"video_file",
[
"RATRACE_wave_f_nm_np1_fr_goo_37.avi",
"TrumanShow_wave_f_nm_np1_fr_med_26.avi",
"v_SoccerJuggling_g23_c01.avi",
"v_SoccerJuggling_g24_c01.avi",
"R6llTwEh07w.mp4",
"SOX5yA1l24A.mp4",
"WUzgd7C1pWA.mp4",
],
)
def test_frame_reading(self, video_file):
full_path = os.path.join(VIDEO_DIR, video_file)
decoder = VideoReader(full_path, device="cuda")
with av.open(full_path) as container:
for av_frame in container.decode(container.streams.video[0]):
av_frames = torch.tensor(av_frame.to_rgb(src_colorspace="ITU709").to_ndarray())
vision_frames = next(decoder)["data"]
mean_delta = torch.mean(torch.abs(av_frames.float() - vision_frames.cpu().float()))
assert mean_delta < 0.75
@pytest.mark.skipif(av is None, reason="PyAV unavailable")
@pytest.mark.parametrize("keyframes", [True, False])
@pytest.mark.parametrize(
"full_path, duration",
[
(os.path.join(VIDEO_DIR, x), y)
for x, y in [
("v_SoccerJuggling_g23_c01.avi", 8.0),
("v_SoccerJuggling_g24_c01.avi", 8.0),
("R6llTwEh07w.mp4", 10.0),
("SOX5yA1l24A.mp4", 11.0),
("WUzgd7C1pWA.mp4", 11.0),
]
],
)
def test_seek_reading(self, keyframes, full_path, duration):
decoder = VideoReader(full_path, device="cuda")
time = duration / 2
decoder.seek(time, keyframes_only=keyframes)
with av.open(full_path) as container:
container.seek(int(time * 1000000), any_frame=not keyframes, backward=False)
for av_frame in container.decode(container.streams.video[0]):
av_frames = torch.tensor(av_frame.to_rgb(src_colorspace="ITU709").to_ndarray())
vision_frames = next(decoder)["data"]
mean_delta = torch.mean(torch.abs(av_frames.float() - vision_frames.cpu().float()))
assert mean_delta < 0.75
@pytest.mark.skipif(av is None, reason="PyAV unavailable")
@pytest.mark.parametrize(
"video_file",
[
"RATRACE_wave_f_nm_np1_fr_goo_37.avi",
"TrumanShow_wave_f_nm_np1_fr_med_26.avi",
"v_SoccerJuggling_g23_c01.avi",
"v_SoccerJuggling_g24_c01.avi",
"R6llTwEh07w.mp4",
"SOX5yA1l24A.mp4",
"WUzgd7C1pWA.mp4",
],
)
def test_metadata(self, video_file):
full_path = os.path.join(VIDEO_DIR, video_file)
decoder = VideoReader(full_path, device="cuda")
video_metadata = decoder.get_metadata()["video"]
with av.open(full_path) as container:
video = container.streams.video[0]
av_duration = float(video.duration * video.time_base)
assert math.isclose(video_metadata["duration"], av_duration, rel_tol=1e-2)
assert math.isclose(video_metadata["fps"], video.base_rate, rel_tol=1e-2)
if __name__ == "__main__":
pytest.main([__file__])
|
import math
import os
import pytest
import torch
import torchvision
from torchvision import _HAS_GPU_VIDEO_DECODER
from torchvision.io import VideoReader
try:
import av
except ImportError:
av = None
VIDEO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "videos")
@pytest.mark.skipif(_HAS_GPU_VIDEO_DECODER is False, reason="Didn't compile with support for gpu decoder")
class TestVideoGPUDecoder:
@pytest.mark.skipif(av is None, reason="PyAV unavailable")
@pytest.mark.parametrize(
"video_file",
[
"RATRACE_wave_f_nm_np1_fr_goo_37.avi",
"TrumanShow_wave_f_nm_np1_fr_med_26.avi",
"v_SoccerJuggling_g23_c01.avi",
"v_SoccerJuggling_g24_c01.avi",
"R6llTwEh07w.mp4",
"SOX5yA1l24A.mp4",
"WUzgd7C1pWA.mp4",
],
)
def test_frame_reading(self, video_file):
torchvision.set_video_backend("cuda")
full_path = os.path.join(VIDEO_DIR, video_file)
decoder = VideoReader(full_path)
with av.open(full_path) as container:
for av_frame in container.decode(container.streams.video[0]):
av_frames = torch.tensor(av_frame.to_rgb(src_colorspace="ITU709").to_ndarray())
vision_frames = next(decoder)["data"]
mean_delta = torch.mean(torch.abs(av_frames.float() - vision_frames.cpu().float()))
assert mean_delta < 0.75
@pytest.mark.skipif(av is None, reason="PyAV unavailable")
@pytest.mark.parametrize("keyframes", [True, False])
@pytest.mark.parametrize(
"full_path, duration",
[
(os.path.join(VIDEO_DIR, x), y)
for x, y in [
("v_SoccerJuggling_g23_c01.avi", 8.0),
("v_SoccerJuggling_g24_c01.avi", 8.0),
("R6llTwEh07w.mp4", 10.0),
("SOX5yA1l24A.mp4", 11.0),
("WUzgd7C1pWA.mp4", 11.0),
]
],
)
def test_seek_reading(self, keyframes, full_path, duration):
torchvision.set_video_backend("cuda")
decoder = VideoReader(full_path)
time = duration / 2
decoder.seek(time, keyframes_only=keyframes)
with av.open(full_path) as container:
container.seek(int(time * 1000000), any_frame=not keyframes, backward=False)
for av_frame in container.decode(container.streams.video[0]):
av_frames = torch.tensor(av_frame.to_rgb(src_colorspace="ITU709").to_ndarray())
vision_frames = next(decoder)["data"]
mean_delta = torch.mean(torch.abs(av_frames.float() - vision_frames.cpu().float()))
assert mean_delta < 0.75
@pytest.mark.skipif(av is None, reason="PyAV unavailable")
@pytest.mark.parametrize(
"video_file",
[
"RATRACE_wave_f_nm_np1_fr_goo_37.avi",
"TrumanShow_wave_f_nm_np1_fr_med_26.avi",
"v_SoccerJuggling_g23_c01.avi",
"v_SoccerJuggling_g24_c01.avi",
"R6llTwEh07w.mp4",
"SOX5yA1l24A.mp4",
"WUzgd7C1pWA.mp4",
],
)
def test_metadata(self, video_file):
torchvision.set_video_backend("cuda")
full_path = os.path.join(VIDEO_DIR, video_file)
decoder = VideoReader(full_path)
video_metadata = decoder.get_metadata()["video"]
with av.open(full_path) as container:
video = container.streams.video[0]
av_duration = float(video.duration * video.time_base)
assert math.isclose(video_metadata["duration"], av_duration, rel_tol=1e-2)
assert math.isclose(video_metadata["fps"], video.base_rate, rel_tol=1e-2)
if __name__ == "__main__":
pytest.main([__file__])
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from terminaltables import AsciiTable
from mmdet.models import dense_heads
from mmdet.models.dense_heads import * # noqa: F401,F403
def test_dense_heads_test_attr():
"""Tests inference methods such as simple_test and aug_test."""
# make list of dense heads
exceptions = ['FeatureAdaption'] # module used in head
all_dense_heads = [m for m in dense_heads.__all__ if m not in exceptions]
# search attributes
check_attributes = [
'simple_test', 'aug_test', 'simple_test_bboxes', 'simple_test_rpn',
'aug_test_rpn'
]
table_header = ['head name'] + check_attributes
table_data = [table_header]
not_found = {k: [] for k in check_attributes}
for target_head_name in all_dense_heads:
target_head = globals()[target_head_name]
target_head_attributes = dir(target_head)
check_results = [target_head_name]
for check_attribute in check_attributes:
found = check_attribute in target_head_attributes
check_results.append(found)
if not found:
not_found[check_attribute].append(target_head_name)
table_data.append(check_results)
table = AsciiTable(table_data)
print()
print(table.table)
# NOTE: this test just checks attributes.
# simple_test of RPN heads will not work now.
assert len(not_found['simple_test']) == 0, \
f'simple_test not found in {not_found["simple_test"]}'
if len(not_found['aug_test']) != 0:
warnings.warn(f'aug_test not found in {not_found["aug_test"]}. '
'Please implement it or raise NotImplementedError.')
|
import warnings
from terminaltables import AsciiTable
from mmdet.models import dense_heads
from mmdet.models.dense_heads import * # noqa: F401,F403
def test_dense_heads_test_attr():
"""Tests inference methods such as simple_test and aug_test."""
# make list of dense heads
exceptions = ['FeatureAdaption'] # module used in head
all_dense_heads = [m for m in dense_heads.__all__ if m not in exceptions]
# search attributes
check_attributes = [
'simple_test', 'aug_test', 'simple_test_bboxes', 'simple_test_rpn',
'aug_test_rpn'
]
table_header = ['head name'] + check_attributes
table_data = [table_header]
not_found = {k: [] for k in check_attributes}
for target_head_name in all_dense_heads:
target_head = globals()[target_head_name]
target_head_attributes = dir(target_head)
check_results = [target_head_name]
for check_attribute in check_attributes:
found = check_attribute in target_head_attributes
check_results.append(found)
if not found:
not_found[check_attribute].append(target_head_name)
table_data.append(check_results)
table = AsciiTable(table_data)
print()
print(table.table)
# NOTE: this test just checks attributes.
# simple_test of RPN heads will not work now.
assert len(not_found['simple_test']) == 0, \
f'simple_test not found in {not_found["simple_test"]}'
if len(not_found['aug_test']) != 0:
warnings.warn(f'aug_test not found in {not_found["aug_test"]}. '
'Please implement it or raise NotImplementedError.')
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
albu_train_transforms = [
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=1,
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.1, 0.3],
contrast_limit=[0.1, 0.3],
p=0.2),
dict(
type='OneOf',
transforms=[
dict(
type='RGBShift',
r_shift_limit=10,
g_shift_limit=10,
b_shift_limit=10,
p=1.0),
dict(
type='HueSaturationValue',
hue_shift_limit=20,
sat_shift_limit=30,
val_shift_limit=20,
p=1.0)
],
p=0.1),
dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2),
dict(type='ChannelShuffle', p=0.1),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
]
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_bboxes_labels', 'gt_ignore_flags'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_masks': 'masks',
'gt_bboxes': 'bboxes'
},
skip_img_without_anno=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
albu_train_transforms = [
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=1,
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.1, 0.3],
contrast_limit=[0.1, 0.3],
p=0.2),
dict(
type='OneOf',
transforms=[
dict(
type='RGBShift',
r_shift_limit=10,
g_shift_limit=10,
b_shift_limit=10,
p=1.0),
dict(
type='HueSaturationValue',
hue_shift_limit=20,
sat_shift_limit=30,
val_shift_limit=20,
p=1.0)
],
p=0.1),
dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2),
dict(type='ChannelShuffle', p=0.1),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
]
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='Pad', size_divisor=32),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_masks': 'masks',
'gt_bboxes': 'bboxes'
},
update_pad_shape=False,
skip_img_without_anno=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'img_norm_cfg',
'pad_shape', 'scale_factor'))
]
data = dict(train=dict(pipeline=train_pipeline))
|
from __future__ import annotations
from enum import Enum
from typing import Any, Optional, Tuple, Union
import torch
from ._datapoint import Datapoint
class BoundingBoxFormat(Enum):
"""[BETA] Coordinate format of a bounding box.
Available formats are
* ``XYXY``
* ``XYWH``
* ``CXCYWH``
"""
XYXY = "XYXY"
XYWH = "XYWH"
CXCYWH = "CXCYWH"
class BoundingBoxes(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for bounding boxes.
.. note::
There should be only one :class:`~torchvision.datapoints.BoundingBoxes`
instance per sample e.g. ``{"img": img, "bbox": BoundingBoxes(...)}``,
although one :class:`~torchvision.datapoints.BoundingBoxes` object can
contain multiple bounding boxes.
Args:
data: Any data that can be turned into a tensor with :func:`torch.as_tensor`.
format (BoundingBoxFormat, str): Format of the bounding box.
canvas_size (two-tuple of ints): Height and width of the corresponding image or video.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
format: BoundingBoxFormat
canvas_size: Tuple[int, int]
@classmethod
def _wrap(cls, tensor: torch.Tensor, *, format: Union[BoundingBoxFormat, str], canvas_size: Tuple[int, int]) -> BoundingBoxes: # type: ignore[override]
if tensor.ndim == 1:
tensor = tensor.unsqueeze(0)
elif tensor.ndim != 2:
raise ValueError(f"Expected a 1D or 2D tensor, got {tensor.ndim}D")
if isinstance(format, str):
format = BoundingBoxFormat[format.upper()]
bounding_boxes = tensor.as_subclass(cls)
bounding_boxes.format = format
bounding_boxes.canvas_size = canvas_size
return bounding_boxes
def __new__(
cls,
data: Any,
*,
format: Union[BoundingBoxFormat, str],
canvas_size: Tuple[int, int],
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> BoundingBoxes:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, format=format, canvas_size=canvas_size)
@classmethod
def wrap_like(
cls,
other: BoundingBoxes,
tensor: torch.Tensor,
*,
format: Optional[Union[BoundingBoxFormat, str]] = None,
canvas_size: Optional[Tuple[int, int]] = None,
) -> BoundingBoxes:
"""Wrap a :class:`torch.Tensor` as :class:`BoundingBoxes` from a reference.
Args:
other (BoundingBoxes): Reference bounding box.
tensor (Tensor): Tensor to be wrapped as :class:`BoundingBoxes`
format (BoundingBoxFormat, str, optional): Format of the bounding box. If omitted, it is taken from the
reference.
canvas_size (two-tuple of ints, optional): Height and width of the corresponding image or video. If
omitted, it is taken from the reference.
"""
return cls._wrap(
tensor,
format=format if format is not None else other.format,
canvas_size=canvas_size if canvas_size is not None else other.canvas_size,
)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr(format=self.format, canvas_size=self.canvas_size)
|
from __future__ import annotations
from enum import Enum
from typing import Any, Optional, Tuple, Union
import torch
from ._datapoint import Datapoint
class BoundingBoxFormat(Enum):
"""[BETA] Coordinate format of a bounding box.
Available formats are
* ``XYXY``
* ``XYWH``
* ``CXCYWH``
"""
XYXY = "XYXY"
XYWH = "XYWH"
CXCYWH = "CXCYWH"
class BoundingBoxes(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for bounding boxes.
Args:
data: Any data that can be turned into a tensor with :func:`torch.as_tensor`.
format (BoundingBoxFormat, str): Format of the bounding box.
canvas_size (two-tuple of ints): Height and width of the corresponding image or video.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
format: BoundingBoxFormat
canvas_size: Tuple[int, int]
@classmethod
def _wrap(cls, tensor: torch.Tensor, *, format: Union[BoundingBoxFormat, str], canvas_size: Tuple[int, int]) -> BoundingBoxes: # type: ignore[override]
if isinstance(format, str):
format = BoundingBoxFormat[format.upper()]
bounding_boxes = tensor.as_subclass(cls)
bounding_boxes.format = format
bounding_boxes.canvas_size = canvas_size
return bounding_boxes
def __new__(
cls,
data: Any,
*,
format: Union[BoundingBoxFormat, str],
canvas_size: Tuple[int, int],
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> BoundingBoxes:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, format=format, canvas_size=canvas_size)
@classmethod
def wrap_like(
cls,
other: BoundingBoxes,
tensor: torch.Tensor,
*,
format: Optional[Union[BoundingBoxFormat, str]] = None,
canvas_size: Optional[Tuple[int, int]] = None,
) -> BoundingBoxes:
"""Wrap a :class:`torch.Tensor` as :class:`BoundingBoxes` from a reference.
Args:
other (BoundingBoxes): Reference bounding box.
tensor (Tensor): Tensor to be wrapped as :class:`BoundingBoxes`
format (BoundingBoxFormat, str, optional): Format of the bounding box. If omitted, it is taken from the
reference.
canvas_size (two-tuple of ints, optional): Height and width of the corresponding image or video. If
omitted, it is taken from the reference.
"""
return cls._wrap(
tensor,
format=format if format is not None else other.format,
canvas_size=canvas_size if canvas_size is not None else other.canvas_size,
)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr(format=self.format, canvas_size=self.canvas_size)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
# TODO: MMEngine does not support fp16 yet.
# fp16 = dict(loss_scale=512.)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, Union
from torch.utils.data import DataLoader
class BaseLoop(metaclass=ABCMeta):
"""Base loop class.
All subclasses inherited from ``BaseLoop`` should overwrite the
:meth:`run` method.
Args:
runner (Runner): A reference of runner.
dataloader (Dataloader or dict): An iterator to generate one batch of
dataset each iteration.
"""
def __init__(self, runner, dataloader: Union[DataLoader, Dict]) -> None:
self._runner = runner
if isinstance(dataloader, dict):
self.dataloader = runner.build_dataloader(dataloader)
else:
self.dataloader = dataloader
@property
def runner(self):
return self._runner
@abstractmethod
def run(self) -> None:
"""Execute loop."""
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, Union
from torch.utils.data import DataLoader
class BaseLoop(metaclass=ABCMeta):
"""Base loop class.
All subclasses inherited from ``BaseLoop`` should overwrite the
:meth:`run` method.
Args:
runner (Runner): A reference of runner.
dataloader (Dataloader or dict): An iterator to generate one batch of
dataset each iteration.
"""
def __init__(self, runner, dataloader: Union[DataLoader, Dict]) -> None:
self._runner = runner
if isinstance(dataloader, dict):
self.dataloader = runner.build_dataloader(dataloader)
else:
self.dataloader = dataloader
# TODO, used by `end_of_epoch` of `Hook`
self._runner.data_loader = self.dataloader
@property
def runner(self):
return self._runner
@abstractmethod
def run(self) -> None:
"""Execute loop."""
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
.. v2betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.tv_tensors.Image`
; this does not scale values.
.. v2betastatus:: ToImage transform
This transform does not support torchscript.
"""
_transformed_types = (is_pure_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> tv_tensors.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image
.. v2betastatus:: ToPILImage transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while adjusting the value range depending on the ``mode``.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_pure_tensor, tv_tensors.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
class ToPureTensor(Transform):
"""[BETA] Convert all tv_tensors to pure tensors, removing associated metadata (if any).
.. v2betastatus:: ToPureTensor transform
This doesn't scale or change the values, only the type.
"""
_transformed_types = (tv_tensors.TVTensor,)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
return inpt.as_subclass(torch.Tensor)
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
.. v2betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.datapoints.Image`
; this does not scale values.
.. v2betastatus:: ToImage transform
This transform does not support torchscript.
"""
_transformed_types = (is_pure_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> datapoints.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image
.. v2betastatus:: ToPILImage transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while adjusting the value range depending on the ``mode``.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_pure_tensor, datapoints.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
class ToPureTensor(Transform):
"""[BETA] Convert all datapoints to pure tensors, removing associated metadata (if any).
.. v2betastatus:: ToPureTensor transform
This doesn't scale or change the values, only the type.
"""
_transformed_types = (datapoints.Datapoint,)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
return inpt.as_subclass(torch.Tensor)
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from langchain_core.callbacks import Callbacks
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallbackManagerForRetrieverRun,
AsyncCallbackManagerForToolRun,
AsyncParentRunManager,
AsyncRunManager,
BaseRunManager,
CallbackManager,
CallbackManagerForChainGroup,
CallbackManagerForChainRun,
CallbackManagerForLLMRun,
CallbackManagerForRetrieverRun,
CallbackManagerForToolRun,
ParentRunManager,
RunManager,
ahandle_event,
atrace_as_chain_group,
handle_event,
trace_as_chain_group,
)
from langchain_core.tracers.context import (
collect_runs,
tracing_enabled,
tracing_v2_enabled,
)
from langchain_core.utils.env import env_var_is_set
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.manager import (
get_openai_callback,
wandb_tracing_enabled,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"get_openai_callback": "langchain_community.callbacks.manager",
"wandb_tracing_enabled": "langchain_community.callbacks.manager",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AsyncCallbackManager",
"AsyncCallbackManagerForChainGroup",
"AsyncCallbackManagerForChainRun",
"AsyncCallbackManagerForLLMRun",
"AsyncCallbackManagerForRetrieverRun",
"AsyncCallbackManagerForToolRun",
"AsyncParentRunManager",
"AsyncRunManager",
"BaseRunManager",
"CallbackManager",
"CallbackManagerForChainGroup",
"CallbackManagerForChainRun",
"CallbackManagerForLLMRun",
"CallbackManagerForRetrieverRun",
"CallbackManagerForToolRun",
"Callbacks",
"ParentRunManager",
"RunManager",
"ahandle_event",
"atrace_as_chain_group",
"collect_runs",
"env_var_is_set",
"get_openai_callback",
"handle_event",
"trace_as_chain_group",
"tracing_enabled",
"tracing_v2_enabled",
"wandb_tracing_enabled",
]
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from langchain_core.callbacks import Callbacks
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallbackManagerForRetrieverRun,
AsyncCallbackManagerForToolRun,
AsyncParentRunManager,
AsyncRunManager,
BaseRunManager,
CallbackManager,
CallbackManagerForChainGroup,
CallbackManagerForChainRun,
CallbackManagerForLLMRun,
CallbackManagerForRetrieverRun,
CallbackManagerForToolRun,
ParentRunManager,
RunManager,
ahandle_event,
atrace_as_chain_group,
handle_event,
trace_as_chain_group,
)
from langchain_core.tracers.context import (
collect_runs,
tracing_enabled,
tracing_v2_enabled,
)
from langchain_core.utils.env import env_var_is_set
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.manager import (
get_openai_callback,
wandb_tracing_enabled,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"get_openai_callback": "langchain_community.callbacks.manager",
"wandb_tracing_enabled": "langchain_community.callbacks.manager",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ahandle_event",
"AsyncCallbackManagerForChainGroup",
"AsyncCallbackManagerForChainRun",
"AsyncCallbackManagerForLLMRun",
"AsyncCallbackManagerForRetrieverRun",
"AsyncCallbackManagerForToolRun",
"AsyncParentRunManager",
"AsyncRunManager",
"atrace_as_chain_group",
"BaseRunManager",
"CallbackManager",
"CallbackManagerForChainGroup",
"CallbackManagerForChainRun",
"CallbackManagerForLLMRun",
"CallbackManagerForRetrieverRun",
"CallbackManagerForToolRun",
"Callbacks",
"AsyncCallbackManager",
"collect_runs",
"env_var_is_set",
"get_openai_callback",
"handle_event",
"ParentRunManager",
"RunManager",
"trace_as_chain_group",
"tracing_enabled",
"tracing_v2_enabled",
"wandb_tracing_enabled",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
from collections import OrderedDict
import torch
from mmengine.runner import CheckpointLoader
convert_dict_fpn = {
'module.backbone.fpn.fpn_inner2': 'neck.lateral_convs.0.conv',
'module.backbone.fpn.fpn_inner3': 'neck.lateral_convs.1.conv',
'module.backbone.fpn.fpn_inner4': 'neck.lateral_convs.2.conv',
'module.backbone.fpn.fpn_layer2': 'neck.fpn_convs.0.conv',
'module.backbone.fpn.fpn_layer3': 'neck.fpn_convs.1.conv',
'module.backbone.fpn.fpn_layer4': 'neck.fpn_convs.2.conv',
'module.backbone.fpn.top_blocks.p6': 'neck.fpn_convs.3.conv',
'module.backbone.fpn.top_blocks.p7': 'neck.fpn_convs.4.conv',
}
def correct_unfold_reduction_order(x):
out_channel, in_channel = x.shape
x = x.reshape(out_channel, 4, in_channel // 4)
x = x[:, [0, 2, 1, 3], :].transpose(1, 2).reshape(out_channel, in_channel)
return x
def correct_unfold_norm_order(x):
in_channel = x.shape[0]
x = x.reshape(4, in_channel // 4)
x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
return x
def convert(ckpt):
new_ckpt = OrderedDict()
for k, v in list(ckpt.items()):
if 'anchor_generator' in k or 'resizer' in k or 'cls_logits' in k:
continue
new_v = v
if 'module.backbone.body' in k:
new_k = k.replace('module.backbone.body', 'backbone')
if 'patch_embed.proj' in new_k:
new_k = new_k.replace('patch_embed.proj',
'patch_embed.projection')
elif 'pos_drop' in new_k:
new_k = new_k.replace('pos_drop', 'drop_after_pos')
if 'layers' in new_k:
new_k = new_k.replace('layers', 'stages')
if 'mlp.fc1' in new_k:
new_k = new_k.replace('mlp.fc1', 'ffn.layers.0.0')
elif 'mlp.fc2' in new_k:
new_k = new_k.replace('mlp.fc2', 'ffn.layers.1')
elif 'attn' in new_k:
new_k = new_k.replace('attn', 'attn.w_msa')
if 'downsample' in k:
if 'reduction.' in k:
new_v = correct_unfold_reduction_order(v)
elif 'norm.' in k:
new_v = correct_unfold_norm_order(v)
elif 'module.backbone.fpn' in k:
old_k = k.replace('.weight', '')
old_k = old_k.replace('.bias', '')
new_k = k.replace(old_k, convert_dict_fpn[old_k])
elif 'module.language_backbone' in k:
new_k = k.replace('module.language_backbone',
'language_model.language_backbone')
if 'pooler' in k:
continue
elif 'module.rpn' in k:
if 'module.rpn.head.scales' in k:
new_k = k.replace('module.rpn.head.scales',
'bbox_head.head.scales')
else:
new_k = k.replace('module.rpn', 'bbox_head')
if 'anchor_generator' in k and 'resizer' in k:
continue
else:
print('skip:', k)
continue
if 'DyConv' in new_k:
new_k = new_k.replace('DyConv', 'dyconvs')
if 'AttnConv' in new_k:
new_k = new_k.replace('AttnConv', 'attnconv')
new_ckpt[new_k] = new_v
return new_ckpt
def main():
parser = argparse.ArgumentParser(
description='Convert keys to mmdet style.')
parser.add_argument(
'src', default='glip_a_tiny_o365.pth', help='src model path or url')
# The dst path must be a full path of the new checkpoint.
parser.add_argument(
'--dst', default='glip_tiny_a_mmdet.pth', help='save path')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
weight = convert(state_dict)
torch.save(weight, args.dst)
sha = subprocess.check_output(['sha256sum', args.dst]).decode()
final_file = args.dst.replace('.pth', '') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', args.dst, final_file])
print(f'Done!!, save to {final_file}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
from collections import OrderedDict
import torch
from mmengine.runner import CheckpointLoader
convert_dict_fpn = {
'module.backbone.fpn.fpn_inner2': 'neck.lateral_convs.0.conv',
'module.backbone.fpn.fpn_inner3': 'neck.lateral_convs.1.conv',
'module.backbone.fpn.fpn_inner4': 'neck.lateral_convs.2.conv',
'module.backbone.fpn.fpn_layer2': 'neck.fpn_convs.0.conv',
'module.backbone.fpn.fpn_layer3': 'neck.fpn_convs.1.conv',
'module.backbone.fpn.fpn_layer4': 'neck.fpn_convs.2.conv',
'module.backbone.fpn.top_blocks.p6': 'neck.fpn_convs.3.conv',
'module.backbone.fpn.top_blocks.p7': 'neck.fpn_convs.4.conv',
}
def correct_unfold_reduction_order(x):
out_channel, in_channel = x.shape
x = x.reshape(out_channel, 4, in_channel // 4)
x = x[:, [0, 2, 1, 3], :].transpose(1, 2).reshape(out_channel, in_channel)
return x
def correct_unfold_norm_order(x):
in_channel = x.shape[0]
x = x.reshape(4, in_channel // 4)
x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
return x
def convert(ckpt):
new_ckpt = OrderedDict()
for k, v in list(ckpt.items()):
if 'anchor_generator' in k or 'resizer' in k or 'cls_logits' in k:
continue
new_v = v
if 'module.backbone.body' in k:
new_k = k.replace('module.backbone.body', 'backbone')
if 'patch_embed.proj' in new_k:
new_k = new_k.replace('patch_embed.proj',
'patch_embed.projection')
elif 'pos_drop' in new_k:
new_k = new_k.replace('pos_drop', 'drop_after_pos')
if 'layers' in new_k:
new_k = new_k.replace('layers', 'stages')
if 'mlp.fc1' in new_k:
new_k = new_k.replace('mlp.fc1', 'ffn.layers.0.0')
elif 'mlp.fc2' in new_k:
new_k = new_k.replace('mlp.fc2', 'ffn.layers.1')
elif 'attn' in new_k:
new_k = new_k.replace('attn', 'attn.w_msa')
if 'downsample' in k:
if 'reduction.' in k:
new_v = correct_unfold_reduction_order(v)
elif 'norm.' in k:
new_v = correct_unfold_norm_order(v)
elif 'module.backbone.fpn' in k:
old_k = k.replace('.weight', '')
old_k = old_k.replace('.bias', '')
new_k = k.replace(old_k, convert_dict_fpn[old_k])
elif 'module.language_backbone' in k:
new_k = k.replace('module.language_backbone',
'language_model.language_backbone')
if 'pooler' in k:
continue
elif 'module.rpn' in k:
if 'module.rpn.head.scales' in k:
new_k = k.replace('module.rpn.head.scales',
'bbox_head.head.scales')
else:
new_k = k.replace('module.rpn', 'bbox_head')
if 'anchor_generator' in k and 'resizer' in k:
continue
else:
print('skip:', k)
continue
if 'DyConv' in new_k:
new_k = new_k.replace('DyConv', 'dyconvs')
if 'AttnConv' in new_k:
new_k = new_k.replace('AttnConv', 'attnconv')
new_ckpt[new_k] = new_v
return new_ckpt
def main():
parser = argparse.ArgumentParser(
description='Convert keys in pretrained eva '
'models to mmpretrain style.')
parser.add_argument(
'src', default='glip_a_tiny_o365.pth', help='src model path or url')
# The dst path must be a full path of the new checkpoint.
parser.add_argument(
'--dst', default='glip_tiny_a_mmdet.pth', help='save path')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
weight = convert(state_dict)
torch.save(weight, args.dst)
sha = subprocess.check_output(['sha256sum', args.dst]).decode()
final_file = args.dst.replace('.pth', '') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', args.dst, final_file])
print(f'Done!!, save to {final_file}')
if __name__ == '__main__':
main()
|
"""Extra array functions built on top of the array API standard."""
from ._delegation import isclose, pad
from ._lib._at import at
from ._lib._funcs import (
apply_where,
atleast_nd,
broadcast_shapes,
cov,
create_diagonal,
expand_dims,
kron,
nunique,
setdiff1d,
sinc,
)
from ._lib._lazy import lazy_apply
__version__ = "0.7.1"
# pylint: disable=duplicate-code
__all__ = [
"__version__",
"apply_where",
"at",
"atleast_nd",
"broadcast_shapes",
"cov",
"create_diagonal",
"expand_dims",
"isclose",
"kron",
"lazy_apply",
"nunique",
"pad",
"setdiff1d",
"sinc",
]
|
"""Extra array functions built on top of the array API standard."""
from ._delegation import isclose, pad
from ._lib._at import at
from ._lib._funcs import (
apply_where,
atleast_nd,
broadcast_shapes,
cov,
create_diagonal,
expand_dims,
kron,
nunique,
setdiff1d,
sinc,
)
from ._lib._lazy import lazy_apply
__version__ = "0.7.0"
# pylint: disable=duplicate-code
__all__ = [
"__version__",
"apply_where",
"at",
"atleast_nd",
"broadcast_shapes",
"cov",
"create_diagonal",
"expand_dims",
"isclose",
"kron",
"lazy_apply",
"nunique",
"pad",
"setdiff1d",
"sinc",
]
|
import types
from typing import TYPE_CHECKING
from docarray.store.file import FileDocStore
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.store.s3 import S3DocStore # noqa: F401
__all__ = ['FileDocStore']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'S3DocStore':
import_library('smart_open', raise_error=True)
import_library('botocore', raise_error=True)
import_library('boto3', raise_error=True)
import docarray.store.s3 as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
store_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return store_cls
|
import types
from typing import TYPE_CHECKING
from docarray.store.file import FileDocStore
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.store.jac import JACDocStore # noqa: F401
from docarray.store.s3 import S3DocStore # noqa: F401
__all__ = ['FileDocStore']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'JACDocStore':
import_library('hubble', raise_error=True)
import docarray.store.jac as lib
elif name == 'S3DocStore':
import_library('smart_open', raise_error=True)
import_library('botocore', raise_error=True)
import_library('boto3', raise_error=True)
import docarray.store.s3 as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
store_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return store_cls
|
_base_ = './queryinst_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 36
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
_base_ = './queryinst_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 36
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# TODO: support auto scaling lr
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
# auto_scale_lr = dict(base_batch_size=16)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
# TODO: support auto scaling lr
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
# auto_scale_lr = dict(base_batch_size=16)
|
"""
In SecGPT, if the hub planner determines that a user query can be addressed solely by an LLM, it utilizes a non-collaborative vanilla spoke, which operates without awareness of other system functionalities.
"""
from llama_index.core.llms.llm import LLM
from llama_index.core.settings import Settings
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.agent import ReActAgent
from typing import List, Optional
from llama_index.core.base.llms.types import ChatMessage
class VanillaSpoke:
"""
A non-collaborative vanilla spoke that operates without awareness of other system functionalities.
It is used when a user query can be addressed solely by an LLM without requiring collaboration.
"""
def __init__(
self, llm: LLM = None, memory: ChatMemoryBuffer = None, verbose: bool = False
) -> None:
"""
Initialize the VanillaSpoke with an LLM and optional memory.
Args:
llm (LLM, optional): A large language model for the spoke. Defaults to None.
memory (ChatMemoryBuffer, optional): The chat memory buffer. Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
"""
self.llm = llm or Settings.llm
self.memory = memory or ChatMemoryBuffer.from_defaults(
chat_history=[], llm=self.llm
)
self.vanilla_agent = ReActAgent.from_tools(
tools=None, llm=self.llm, memory=self.memory, verbose=verbose
)
def chat(
self,
query: str,
chat_history: Optional[List[ChatMessage]] = None,
):
"""
Perform a chat interaction with the vanilla agent.
Args:
query (str): The query to be processed.
chat_history (Optional[List[ChatMessage]], optional): The chat history. Defaults to None.
Returns:
str: The response from the vanilla agent.
"""
return self.vanilla_agent.chat(query, chat_history=chat_history)
|
"""
In SecGPT, if the hub planner determines that a user query can be addressed solely by an LLM, it utilizes a non-collaborative vanilla spoke, which operates without awareness of other system functionalities.
"""
from llama_index.core.llms.llm import LLM
from llama_index.core.settings import Settings
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.agent import ReActAgent
from typing import List, Optional
from llama_index.core.base.llms.types import ChatMessage
class VanillaSpoke:
"""
A non-collaborative vanilla spoke that operates without awareness of other system functionalities.
It is used when a user query can be addressed solely by an LLM without requiring collaboration.
"""
def __init__(
self, llm: LLM = None, memory: ChatMemoryBuffer = None, verbose: bool = False
) -> None:
"""
Initialize the VanillaSpoke with an LLM and optional memory.
Args:
llm (LLM, optional): A large language model for the spoke. Defaults to None.
memory (ChatMemoryBuffer, optional): The chat memory buffer. Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
"""
self.llm = llm or Settings.llm
self.memory = memory or ChatMemoryBuffer.from_defaults(
chat_history=[], llm=self.llm
)
self.vanilla_agent = ReActAgent.from_tools(
tools=None, llm=self.llm, memory=self.memory, verbose=verbose
)
def chat(
self,
query: str,
chat_history: Optional[List[ChatMessage]] = None,
):
"""
Perform a chat interaction with the vanilla agent.
Args:
query (str): The query to be processed.
chat_history (Optional[List[ChatMessage]], optional): The chat history. Defaults to None.
Returns:
str: The response from the vanilla agent.
"""
return self.vanilla_agent.chat(query, chat_history=chat_history)
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Rescaling")
class Rescaling(TFDataLayer):
"""A preprocessing layer which rescales input values to a new range.
This layer rescales every value of an input (often an image) by multiplying
by `scale` and adding `offset`.
For instance:
1. To rescale an input in the `[0, 255]` range
to be in the `[0, 1]` range, you would pass `scale=1./255`.
2. To rescale an input in the `[0, 255]` range to be in the `[-1, 1]` range,
you would pass `scale=1./127.5, offset=-1`.
The rescaling is applied both during training and inference. Inputs can be
of integer or floating point dtype, and by default the layer will output
floats.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
scale: Float, the scale to apply to the inputs.
offset: Float, the offset to apply to the inputs.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(self, scale, offset=0.0, **kwargs):
super().__init__(**kwargs)
self.scale = scale
self.offset = offset
self.supports_masking = True
def call(self, inputs):
dtype = self.compute_dtype
scale = self.backend.cast(self.scale, dtype)
offset = self.backend.cast(self.offset, dtype)
scale_shape = self.backend.core.shape(scale)
if (
len(scale_shape) > 0
and backend.image_data_format() == "channels_first"
):
scale = self.backend.numpy.reshape(
scale, scale_shape + (1,) * (3 - len(scale_shape))
)
return self.backend.cast(inputs, dtype) * scale + offset
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update(
{
# `scale` and `offset` might be numpy array.
"scale": serialization_lib.serialize_keras_object(self.scale),
"offset": serialization_lib.serialize_keras_object(self.offset),
}
)
return config
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
config["scale"] = serialization_lib.deserialize_keras_object(
config["scale"], custom_objects=custom_objects
)
config["offset"] = serialization_lib.deserialize_keras_object(
config["offset"], custom_objects=custom_objects
)
return cls(**config)
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.tf_data_layer import TFDataLayer
@keras_export("keras.layers.Rescaling")
class Rescaling(TFDataLayer):
"""A preprocessing layer which rescales input values to a new range.
This layer rescales every value of an input (often an image) by multiplying
by `scale` and adding `offset`.
For instance:
1. To rescale an input in the `[0, 255]` range
to be in the `[0, 1]` range, you would pass `scale=1./255`.
2. To rescale an input in the `[0, 255]` range to be in the `[-1, 1]` range,
you would pass `scale=1./127.5, offset=-1`.
The rescaling is applied both during training and inference. Inputs can be
of integer or floating point dtype, and by default the layer will output
floats.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
scale: Float, the scale to apply to the inputs.
offset: Float, the offset to apply to the inputs.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
"""
def __init__(self, scale, offset=0.0, **kwargs):
super().__init__(**kwargs)
self.scale = scale
self.offset = offset
self.supports_masking = True
def call(self, inputs):
dtype = self.compute_dtype
scale = self.backend.cast(self.scale, dtype)
offset = self.backend.cast(self.offset, dtype)
scale_shape = self.backend.core.shape(scale)
if (
len(scale_shape) > 0
and backend.image_data_format() == "channels_first"
):
scale = self.backend.numpy.reshape(
scale, scale_shape + (1,) * (3 - len(scale_shape))
)
return self.backend.cast(inputs, dtype) * scale + offset
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"scale": self.scale,
"offset": self.offset,
}
return {**base_config, **config}
|
from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING:
from docarray.typing import T
from docarray.document.strawberry_type import StrawberryDocument
class StrawberryMixin:
def to_strawberry_type(self) -> List['StrawberryDocument']:
"""Convert a DocumentArray object into a Pydantic model."""
return [d.to_strawberry_type() for d in self]
@classmethod
def from_strawberry_type(cls: Type['T'], model: List['StrawberryDocument']) -> 'T':
"""Convert a list of Strawberry into DocumentArray
:param model: the list of strawberry type objects that represents a DocumentArray
:return: a DocumentArray
"""
from docarray import Document
return cls(Document.from_strawberry_type(m) for m in model)
|
from typing import TYPE_CHECKING, Type, List
if TYPE_CHECKING:
from ...typing import T
from ...document.strawberry_type import StrawberryDocument
class StrawberryMixin:
def to_strawberry_type(self) -> List['StrawberryDocument']:
"""Convert a DocumentArray object into a Pydantic model."""
return [d.to_strawberry_type() for d in self]
@classmethod
def from_strawberry_type(cls: Type['T'], model: List['StrawberryDocument']) -> 'T':
"""Convert a list of Strawberry into DocumentArray
:param model: the list of strawberry type objects that represents a DocumentArray
:return: a DocumentArray
"""
from ... import Document
return cls(Document.from_strawberry_type(m) for m in model)
|
import subprocess
import sys
import time
def wait_for_postgres(max_retries=5, delay=5):
for _ in range(max_retries):
try:
result = subprocess.run(
[
"docker",
"compose",
"-f",
"docker-compose.test.yaml",
"exec",
"postgres-test",
"pg_isready",
"-U",
"postgres",
"-d",
"postgres",
],
check=True,
capture_output=True,
text=True,
)
if "accepting connections" in result.stdout:
print("PostgreSQL is ready.")
return True
except subprocess.CalledProcessError:
print(f"PostgreSQL is not ready yet. Retrying in {delay} seconds...")
time.sleep(delay)
print("Failed to connect to PostgreSQL.")
return False
def run_command(command, check=True):
try:
subprocess.run(command, check=check)
except subprocess.CalledProcessError as e:
print(f"Command failed: {e}")
sys.exit(1)
def test():
# Start PostgreSQL with Docker Compose
run_command(
[
"docker",
"compose",
"-f",
"docker-compose.test.yaml",
"up",
"-d",
]
)
if not wait_for_postgres():
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
sys.exit(1)
# Run Prisma migrations
run_command(["prisma", "migrate", "dev"])
# Run the tests
result = subprocess.run(["pytest"] + sys.argv[1:], check=False)
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
sys.exit(result.returncode)
|
import subprocess
import sys
import time
def wait_for_postgres(max_retries=5, delay=5):
for _ in range(max_retries):
try:
result = subprocess.run(
[
"docker",
"compose",
"-f",
"docker-compose.test.yaml",
"exec",
"postgres-test",
"pg_isready",
"-U",
"postgres",
"-d",
"postgres",
],
check=True,
capture_output=True,
text=True,
)
if "accepting connections" in result.stdout:
print("PostgreSQL is ready.")
return True
except subprocess.CalledProcessError:
print(f"PostgreSQL is not ready yet. Retrying in {delay} seconds...")
time.sleep(delay)
print("Failed to connect to PostgreSQL.")
return False
def run_command(command, check=True):
try:
subprocess.run(command, check=check)
except subprocess.CalledProcessError as e:
print(f"Command failed: {e}")
sys.exit(1)
def test():
# Start PostgreSQL with Docker Compose
run_command(
[
"docker",
"compose",
"-f",
"docker-compose.test.yaml",
"up",
"-d",
"postgres-test",
]
)
if not wait_for_postgres():
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
sys.exit(1)
# Run Prisma migrations
run_command(["prisma", "migrate", "dev"])
# Run the tests
result = subprocess.run(["pytest"] + sys.argv[1:], check=False)
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
sys.exit(result.returncode)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.parallel import is_module_wrapper
from mmcv.runner import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class SetEpochInfoHook(Hook):
"""Set runner's epoch information to the model."""
def before_train_epoch(self, runner):
epoch = runner.epoch
model = runner.model
if is_module_wrapper(model):
model = model.module
model.set_epoch(epoch)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.parallel import is_module_wrapper
from mmcv.runner import HOOKS, Hook
@HOOKS.register_module()
class SetEpochInfoHook(Hook):
"""Set runner's epoch information to the model."""
def before_train_epoch(self, runner):
epoch = runner.epoch
model = runner.model
if is_module_wrapper(model):
model = model.module
model.set_epoch(epoch)
|
import os
import time
import uuid
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml_v7 = os.path.abspath(os.path.join(cur_dir, 'v7/docker-compose.yml'))
compose_yml_v8 = os.path.abspath(os.path.join(cur_dir, 'v8/docker-compose.yml'))
@pytest.fixture(scope='module', autouse=True)
def start_storage_v7():
os.system(f"docker-compose -f {compose_yml_v7} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker-compose -f {compose_yml_v7} down --remove-orphans")
@pytest.fixture(scope='module', autouse=True)
def start_storage_v8():
os.system(f"docker-compose -f {compose_yml_v8} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker-compose -f {compose_yml_v8} down --remove-orphans")
def _wait_for_es():
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='http://localhost:9200/')
while not es.ping():
time.sleep(0.5)
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dims=1000)
class FlatDoc(BaseDoc):
tens_one: NdArray = Field(dims=10)
tens_two: NdArray = Field(dims=50)
class NestedDoc(BaseDoc):
d: SimpleDoc
class DeepNestedDoc(BaseDoc):
d: NestedDoc
class MyImageDoc(ImageDoc):
embedding: NdArray = Field(dims=128)
@pytest.fixture(scope='function')
def ten_simple_docs():
return [SimpleDoc(tens=np.random.randn(10)) for _ in range(10)]
@pytest.fixture(scope='function')
def ten_flat_docs():
return [
FlatDoc(tens_one=np.random.randn(10), tens_two=np.random.randn(50))
for _ in range(10)
]
@pytest.fixture(scope='function')
def ten_nested_docs():
return [NestedDoc(d=SimpleDoc(tens=np.random.randn(10))) for _ in range(10)]
@pytest.fixture(scope='function')
def ten_deep_nested_docs():
return [
DeepNestedDoc(d=NestedDoc(d=SimpleDoc(tens=np.random.randn(10))))
for _ in range(10)
]
@pytest.fixture(scope='function')
def tmp_index_name():
return uuid.uuid4().hex
|
import os
import time
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml_v7 = os.path.abspath(os.path.join(cur_dir, 'v7/docker-compose.yml'))
compose_yml_v8 = os.path.abspath(os.path.join(cur_dir, 'v8/docker-compose.yml'))
@pytest.fixture(scope='module', autouse=True)
def start_storage_v7():
os.system(f"docker-compose -f {compose_yml_v7} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker-compose -f {compose_yml_v7} down --remove-orphans")
@pytest.fixture(scope='module', autouse=True)
def start_storage_v8():
os.system(f"docker-compose -f {compose_yml_v8} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker-compose -f {compose_yml_v8} down --remove-orphans")
def _wait_for_es():
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='http://localhost:9200/')
while not es.ping():
time.sleep(0.5)
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dims=1000)
class FlatDoc(BaseDoc):
tens_one: NdArray = Field(dims=10)
tens_two: NdArray = Field(dims=50)
class NestedDoc(BaseDoc):
d: SimpleDoc
class DeepNestedDoc(BaseDoc):
d: NestedDoc
class MyImageDoc(ImageDoc):
embedding: NdArray = Field(dims=128)
@pytest.fixture(scope='function')
def ten_simple_docs():
return [SimpleDoc(tens=np.random.randn(10)) for _ in range(10)]
@pytest.fixture(scope='function')
def ten_flat_docs():
return [
FlatDoc(tens_one=np.random.randn(10), tens_two=np.random.randn(50))
for _ in range(10)
]
@pytest.fixture(scope='function')
def ten_nested_docs():
return [NestedDoc(d=SimpleDoc(tens=np.random.randn(10))) for _ in range(10)]
@pytest.fixture(scope='function')
def ten_deep_nested_docs():
return [
DeepNestedDoc(d=NestedDoc(d=SimpleDoc(tens=np.random.randn(10))))
for _ in range(10)
]
|
import json
import os
from typing import List
import torch
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: List[int] = [1, 3, 5],
stride_sizes: List[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json"), "r") as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
model = CNN(**config)
model.load_state_dict(weights)
return model
|
import torch
from torch import nn
from typing import List
import os
import json
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: List[int] = [1, 3, 5],
stride_sizes: List[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json"), "r") as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
model = CNN(**config)
model.load_state_dict(weights)
return model
|
from typing import Union, Iterable, Dict, List
import warnings
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
@staticmethod
def _parse_index_ids_from_bulk_info(
accumulated_info: List[Dict],
) -> Dict[str, List[int]]:
"""Parse ids from bulk info of failed send request to ES operation
:param accumulated_info: accumulated info of failed operation
:return: dict containing failed index ids of each operation type
"""
parsed_ids = {}
for info in accumulated_info:
for _op_type in info.keys():
if '_id' in info[_op_type]:
if _op_type not in parsed_ids:
parsed_ids[_op_type] = []
parsed_ids[_op_type].append(info[_op_type]['_id'])
return parsed_ids
def _upload_batch(self, docs: Iterable['Document'], **kwargs) -> List[int]:
requests = [self._document_to_elastic(doc) for doc in docs]
accumulated_info = self._send_requests(requests, **kwargs)
self._refresh(self._config.index_name)
successful_ids = self._parse_index_ids_from_bulk_info(accumulated_info)
if 'index' not in successful_ids:
return []
return successful_ids['index']
def _extend(self, docs: Iterable['Document'], **kwargs):
docs = list(docs)
successful_indexed_ids = self._upload_batch(docs, **kwargs)
self._offset2ids.extend(
[_id for _id in successful_indexed_ids if _id not in self._offset2ids.ids]
)
if len(successful_indexed_ids) != len(docs):
doc_ids = [doc.id for doc in docs]
failed_index_ids = set(doc_ids) - set(successful_indexed_ids)
err_msg = f'fail to add Documents with ids: {failed_index_ids}'
warnings.warn(err_msg)
raise IndexError(err_msg)
|
from typing import Union, Iterable, Dict, List
import warnings
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
@staticmethod
def _parse_index_ids_from_bulk_info(
accumulated_info: List[Dict],
) -> Dict[str, List[int]]:
"""Parse ids from bulk info of failed send request to ES operation
:param accumulated_info: accumulated info of failed operation
:return: dict containing failed index ids of each operation type
"""
parsed_ids = {}
for info in accumulated_info:
for _op_type in info.keys():
if '_id' in info[_op_type]:
if _op_type not in parsed_ids:
parsed_ids[_op_type] = []
parsed_ids[_op_type].append(info[_op_type]['_id'])
return parsed_ids
def _upload_batch(self, docs: Iterable['Document'], **kwargs) -> List[int]:
requests = [self._document_to_elastic(doc) for doc in docs]
accumulated_info = self._send_requests(requests, **kwargs)
self._refresh(self._config.index_name)
successful_ids = self._parse_index_ids_from_bulk_info(accumulated_info)
if 'index' not in successful_ids:
return []
return successful_ids['index']
def _extend(self, docs: Iterable['Document'], **kwargs):
docs = list(docs)
successful_indexed_ids = self._upload_batch(docs, **kwargs)
self._offset2ids.extend(
[_id for _id in successful_indexed_ids if _id not in self._offset2ids.ids]
)
if len(successful_indexed_ids) != len(docs):
doc_ids = [doc.id for doc in docs]
failed_index_ids = set(doc_ids) - set(successful_indexed_ids)
err_msg = f'fail to add Documents with ids: {failed_index_ids}'
warnings.warn(err_msg)
raise IndexError(err_msg)
|
from typing import Optional
import numpy as np
from docarray import BaseDoc, DocVec
from docarray.typing import ImageUrl, NdArray
def test_optional():
class Features(BaseDoc):
tensor: NdArray[100]
class Image(BaseDoc):
url: ImageUrl
features: Optional[Features] = None
docs = DocVec[Image]([Image(url='http://url.com/foo.png') for _ in range(10)])
print(docs.features) # None
docs.features = [Features(tensor=np.random.random([100])) for _ in range(10)]
print(docs.features) # <DocVec[Features] (length=10)>
assert isinstance(docs.features, DocVec)
assert isinstance(docs.features[0], Features)
docs.features.tensor = np.ones((10, 100))
assert docs[0].features.tensor.shape == (100,)
docs.features = None
assert docs[0].features is None
|
from typing import Optional
import numpy as np
from docarray import BaseDoc, DocVec
from docarray.typing import ImageUrl, NdArray
def test_optional():
class Features(BaseDoc):
tensor: NdArray[100]
class Image(BaseDoc):
url: ImageUrl
features: Optional[Features] = None
docs = DocVec[Image]([Image(url='http://url.com/foo.png') for _ in range(10)])
print(docs.features) # None
docs.features = [Features(tensor=np.random.random([100])) for _ in range(10)]
print(docs.features) # <DocVec[Features] (length=10)>
assert isinstance(docs.features, DocVec[Features])
docs.features.tensor = np.ones((10, 100))
assert docs[0].features.tensor.shape == (100,)
docs.features = None
assert docs[0].features is None
|
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_ping_parser(parser=None):
"""Set the parser for `ping`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'target',
type=str,
choices=['flow', 'executor', 'gateway'],
help='The target type to ping. For `executor` and `gateway`, checks the readiness of the individual service. '
'For `flow` it checks the connectivity of the complete microservice architecture.',
default='executor',
)
parser.add_argument(
'host',
type=str,
help='The host address with port of a target Executor, Gateway or a Flow, e.g. 0.0.0.0:8000. For Flow or Gateway, host can also indicate the protocol, grpc will be used if not provided, e.g http://0.0.0.0:8000',
)
parser.add_argument(
'--timeout',
type=int,
default=3000,
help='''
Timeout in millisecond of one check
-1 for waiting forever
''',
)
parser.add_argument(
'--attempts',
type=int,
default=1,
help='The number of readiness checks to perform',
)
parser.add_argument(
'--min-successful-attempts',
type=int,
default=1,
help='The minimum number of successful readiness checks, before exiting successfully with exit(0)',
)
return parser
|
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_ping_parser(parser=None):
"""Set the parser for `ping`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'target',
type=str,
choices=['flow', 'executor'],
help='The target type to ping',
default='executor',
)
parser.add_argument(
'host',
type=str,
help='The host address with port of a target Executor or a Flow, e.g. 0.0.0.0:8000',
)
parser.add_argument(
'--timeout',
type=int,
default=3000,
help='''
Timeout in millisecond of one check
-1 for waiting forever
''',
)
parser.add_argument(
'--retries',
type=int,
default=3,
help='The max number of tried health checks before exit with exit code 1',
)
return parser
|
"""Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import platform
import warnings
from collections.abc import Sequence
import numpy as np
from ..exceptions import DataConversionWarning
from . import _joblib, metadata_routing
from ._bunch import Bunch
from ._chunking import gen_batches, gen_even_slices
from ._estimator_html_repr import estimator_html_repr
# Make _safe_indexing importable from here for backward compat as this particular
# helper is considered semi-private and typically very useful for third-party
# libraries that want to comply with scikit-learn's estimator API. In particular,
# _safe_indexing was included in our public API documentation despite the leading
# `_` in its name.
from ._indexing import (
_safe_indexing, # noqa
resample,
shuffle,
)
from ._mask import safe_mask
from ._tags import (
ClassifierTags,
InputTags,
RegressorTags,
Tags,
TargetTags,
TransformerTags,
default_tags,
get_tags,
)
from .class_weight import compute_class_weight, compute_sample_weight
from .deprecation import deprecated
from .discovery import all_estimators
from .extmath import safe_sqr
from .murmurhash import murmurhash3_32
from .validation import (
as_float_array,
assert_all_finite,
check_array,
check_consistent_length,
check_random_state,
check_scalar,
check_symmetric,
check_X_y,
column_or_1d,
indexable,
)
# TODO(1.7): remove parallel_backend and register_parallel_backend
msg = "deprecated in 1.5 to be removed in 1.7. Use joblib.{} instead."
register_parallel_backend = deprecated(msg)(_joblib.register_parallel_backend)
# if a class, deprecated will change the object in _joblib module so we need to subclass
@deprecated(msg)
class parallel_backend(_joblib.parallel_backend):
pass
__all__ = [
"murmurhash3_32",
"as_float_array",
"assert_all_finite",
"check_array",
"check_random_state",
"compute_class_weight",
"compute_sample_weight",
"column_or_1d",
"check_consistent_length",
"check_X_y",
"check_scalar",
"indexable",
"check_symmetric",
"deprecated",
"parallel_backend",
"register_parallel_backend",
"resample",
"shuffle",
"all_estimators",
"DataConversionWarning",
"estimator_html_repr",
"Bunch",
"metadata_routing",
"safe_sqr",
"safe_mask",
"gen_batches",
"gen_even_slices",
"Tags",
"InputTags",
"TargetTags",
"ClassifierTags",
"RegressorTags",
"TransformerTags",
"default_tags",
"get_tags",
]
# TODO(1.7): remove
def __getattr__(name):
if name == "IS_PYPY":
warnings.warn(
"IS_PYPY is deprecated and will be removed in 1.7.",
FutureWarning,
)
return platform.python_implementation() == "PyPy"
raise AttributeError(f"module {__name__} has no attribute {name}")
# TODO(1.7): remove tosequence
@deprecated("tosequence was deprecated in 1.5 and will be removed in 1.7")
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible.
Parameters
----------
x : iterable
The iterable to be converted.
Returns
-------
x : Sequence
If `x` is a NumPy array, it returns it as a `ndarray`. If `x`
is a `Sequence`, `x` is returned as-is. If `x` is from any other
type, `x` is returned casted as a list.
"""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
|
"""Various utilities to help with development."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import platform
import warnings
from collections.abc import Sequence
import numpy as np
from ..exceptions import DataConversionWarning
from . import _joblib, metadata_routing
from ._bunch import Bunch
from ._chunking import gen_batches, gen_even_slices
from ._estimator_html_repr import estimator_html_repr
# Make _safe_indexing importable from here for backward compat as this particular
# helper is considered semi-private and typically very useful for third-party
# libraries that want to comply with scikit-learn's estimator API. In particular,
# _safe_indexing was included in our public API documentation despite the leading
# `_` in its name.
from ._indexing import (
_safe_indexing, # noqa
resample,
shuffle,
)
from ._mask import safe_mask
from .class_weight import compute_class_weight, compute_sample_weight
from .deprecation import deprecated
from .discovery import all_estimators
from .extmath import safe_sqr
from .murmurhash import murmurhash3_32
from .validation import (
as_float_array,
assert_all_finite,
check_array,
check_consistent_length,
check_random_state,
check_scalar,
check_symmetric,
check_X_y,
column_or_1d,
indexable,
)
# TODO(1.7): remove parallel_backend and register_parallel_backend
msg = "deprecated in 1.5 to be removed in 1.7. Use joblib.{} instead."
register_parallel_backend = deprecated(msg)(_joblib.register_parallel_backend)
# if a class, deprecated will change the object in _joblib module so we need to subclass
@deprecated(msg)
class parallel_backend(_joblib.parallel_backend):
pass
__all__ = [
"murmurhash3_32",
"as_float_array",
"assert_all_finite",
"check_array",
"check_random_state",
"compute_class_weight",
"compute_sample_weight",
"column_or_1d",
"check_consistent_length",
"check_X_y",
"check_scalar",
"indexable",
"check_symmetric",
"deprecated",
"parallel_backend",
"register_parallel_backend",
"resample",
"shuffle",
"all_estimators",
"DataConversionWarning",
"estimator_html_repr",
"Bunch",
"metadata_routing",
"safe_sqr",
"safe_mask",
"gen_batches",
"gen_even_slices",
]
# TODO(1.7): remove
def __getattr__(name):
if name == "IS_PYPY":
warnings.warn(
"IS_PYPY is deprecated and will be removed in 1.7.",
FutureWarning,
)
return platform.python_implementation() == "PyPy"
raise AttributeError(f"module {__name__} has no attribute {name}")
# TODO(1.7): remove tosequence
@deprecated("tosequence was deprecated in 1.5 and will be removed in 1.7")
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible.
Parameters
----------
x : iterable
The iterable to be converted.
Returns
-------
x : Sequence
If `x` is a NumPy array, it returns it as a `ndarray`. If `x`
is a `Sequence`, `x` is returned as-is. If `x` is from any other
type, `x` is returned casted as a list.
"""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
|
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
import os
import time
from jina import Executor, requests, DocumentArray
class SlowProcessExecutor(Executor):
def __init__(self, time_sleep=1.0, *args, **kwargs):
super().__init__(*args, **kwargs)
self.time_sleep = time_sleep
@requests
def process(self, docs: DocumentArray, *args, **kwargs):
time.sleep(self.time_sleep)
for doc in docs:
doc.tags['replica_uid'] = os.environ['POD_UID']
doc.tags['time'] = time.time()
return docs
|
import os
import time
from jina import Executor, requests, DocumentArray
class SlowProcessExecutor(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from jina.logging.logger import JinaLogger
self.logger = JinaLogger(self.__class__.__name__)
@requests
def process(self, docs: DocumentArray, *args, **kwargs):
time.sleep(1.0)
for doc in docs:
doc.tags['replica_uid'] = os.environ['POD_UID']
doc.tags['time'] = time.time()
return docs
|
import os
import grpc
import pytest
from jina import Flow, __default_host__
from jina.clients import Client
from jina.excepts import PortAlreadyUsed
from jina.helper import is_port_free
from jina.serve.runtimes.gateway.grpc import GRPCGateway
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime as _GRPCGatewayRuntime
from jina.serve.runtimes.helper import _get_grpc_server_options
from tests import random_docs
@pytest.fixture(scope='function')
def flow_with_grpc(monkeypatch):
class AuthInterceptor(grpc.aio.ServerInterceptor):
def __init__(self, key):
self._valid_metadata = ('rpc-auth-header', key)
def deny(_, context):
context.abort(grpc.StatusCode.UNAUTHENTICATED, 'Invalid key')
self._deny = grpc.unary_unary_rpc_method_handler(deny)
async def intercept_service(self, continuation, handler_call_details):
meta = handler_call_details.invocation_metadata
metas_dicts = {m.key: m.value for m in meta}
assert 'rpc-auth-header' in metas_dicts
assert (
metas_dicts['rpc-auth-header'] == 'access_key'
), f'Invalid access key detected, got {metas_dicts["rpc-auth-header"]}'
for m in meta:
if m == self._valid_metadata:
return await continuation(handler_call_details)
return self._deny
class AlternativeGRPCGateway(GRPCGateway):
def __init__(self, *args, **kwargs):
super(AlternativeGRPCGateway, self).__init__(*args, **kwargs)
self.server = grpc.aio.server(
interceptors=(AuthInterceptor('access_key'),),
options=_get_grpc_server_options(self.grpc_server_options),
)
class AlternativeGRPCGatewayRuntime(_GRPCGatewayRuntime):
async def async_setup(self):
"""
The async method to setup.
Create the gRPC server and expose the port for communication.
"""
if not self.args.proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
if not (is_port_free(__default_host__, self.args.port)):
raise PortAlreadyUsed(f'port:{self.args.port}')
self.gateway = AlternativeGRPCGateway(
name=self.name,
grpc_server_options=self.args.grpc_server_options,
port=self.args.port,
ssl_keyfile=self.args.ssl_keyfile,
ssl_certfile=self.args.ssl_certfile,
)
self.gateway.set_streamer(
args=self.args,
timeout_send=self.timeout_send,
metrics_registry=self.metrics_registry,
runtime_name=self.name,
)
await self.gateway.setup_server()
monkeypatch.setattr(
'jina.serve.runtimes.gateway.grpc.GRPCGatewayRuntime',
AlternativeGRPCGatewayRuntime,
)
return Flow(protocol='grpc').add()
def test_client_grpc_kwargs(flow_with_grpc):
with flow_with_grpc:
client = Client(
port=flow_with_grpc.port,
host='localhost',
protocol='grpc',
)
meta_data = (('rpc-auth-header', 'invalid_access_key'),)
try:
client.post('', random_docs(1), request_size=1, metadata=meta_data)
except Exception as exc:
assert 'Invalid access key detected, got invalid_access_key' in repr(exc)
|
import os
import grpc
import pytest
from jina import Flow, __default_host__
from jina.clients import Client
from jina.excepts import PortAlreadyUsed
from jina.helper import is_port_free
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime as _GRPCGatewayRuntime
from jina.serve.runtimes.helper import _get_grpc_server_options
from tests import random_docs
@pytest.fixture(scope='function')
def flow_with_grpc(monkeypatch):
class AuthInterceptor(grpc.aio.ServerInterceptor):
def __init__(self, key):
self._valid_metadata = ('rpc-auth-header', key)
def deny(_, context):
context.abort(grpc.StatusCode.UNAUTHENTICATED, 'Invalid key')
self._deny = grpc.unary_unary_rpc_method_handler(deny)
async def intercept_service(self, continuation, handler_call_details):
meta = handler_call_details.invocation_metadata
metas_dicts = {m.key: m.value for m in meta}
assert 'rpc-auth-header' in metas_dicts
assert (
metas_dicts['rpc-auth-header'] == 'access_key'
), f'Invalid access key detected, got {metas_dicts["rpc-auth-header"]}'
for m in meta:
if m == self._valid_metadata:
return await continuation(handler_call_details)
return self._deny
class AlternativeGRPCGatewayRuntime(_GRPCGatewayRuntime):
async def async_setup(self):
"""
The async method to setup.
Create the gRPC server and expose the port for communication.
"""
if not self.args.proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
if not (is_port_free(__default_host__, self.args.port)):
raise PortAlreadyUsed(f'port:{self.args.port}')
self.server = grpc.aio.server(
interceptors=(AuthInterceptor('access_key'),),
options=_get_grpc_server_options(self.args.grpc_server_options),
)
await self._async_setup_server()
monkeypatch.setattr(
'jina.serve.runtimes.gateway.grpc.GRPCGatewayRuntime',
AlternativeGRPCGatewayRuntime,
)
return Flow(protocol='grpc').add()
def test_client_grpc_kwargs(flow_with_grpc):
with flow_with_grpc:
client = Client(
port=flow_with_grpc.port,
host='localhost',
protocol='grpc',
)
meta_data = (('rpc-auth-header', 'invalid_access_key'),)
try:
client.post('', random_docs(1), request_size=1, metadata=meta_data)
except Exception as exc:
assert 'Invalid access key detected, got invalid_access_key' in repr(exc)
|
_base_ = '../mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py'
model = dict(
roi_head=dict(
type='PISARoIHead',
bbox_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
train_cfg=dict(
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
sampler=dict(
type='ScoreHLRSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True,
k=0.5,
bias=0.),
isr=dict(k=2, bias=0),
carl=dict(k=1, bias=0.2))),
test_cfg=dict(
rpn=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py'
model = dict(
roi_head=dict(
type='PISARoIHead',
bbox_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
train_cfg=dict(
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
sampler=dict(
type='ScoreHLRSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True,
k=0.5,
bias=0.),
isr=dict(k=2, bias=0),
carl=dict(k=1, bias=0.2))),
test_cfg=dict(
rpn=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import Formatter
if TYPE_CHECKING:
import torch
class TorchFormatter(Formatter[dict, "torch.Tensor", dict]):
def __init__(self, features=None, decoded=True, **torch_tensor_kwargs):
super().__init__(features=features, decoded=decoded)
self.torch_tensor_kwargs = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _consolidate(self, column):
import torch
if isinstance(column, list) and column:
if all(
isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column
):
return torch.stack(column)
return column
def _tensorize(self, value):
import torch
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": torch.int64}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": torch.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct)
def format_row(self, pa_table: pa.Table) -> dict:
row = self.numpy_arrow_extractor().extract_row(pa_table)
if self.decoded:
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "torch.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
if self.decoded:
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
if self.decoded:
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from ..utils.py_utils import map_nested
from .formatting import Formatter
if TYPE_CHECKING:
import torch
class TorchFormatter(Formatter[dict, "torch.Tensor", dict]):
def __init__(self, features=None, decoded=True, **torch_tensor_kwargs):
self.torch_tensor_kwargs = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _tensorize(self, value):
import torch
default_dtype = {}
if np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": torch.int64}
elif np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": torch.float32}
return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, (list, np.ndarray)):
data_struct = np.array(data_struct, copy=False)
if data_struct.dtype == object: # pytorch tensors cannot be instantied from an array of objects
return [self.recursive_tensorize(substruct) for substruct in data_struct]
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> dict:
row = self.numpy_arrow_extractor().extract_row(pa_table)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "torch.Tensor":
col = self.numpy_arrow_extractor().extract_column(pa_table)
return self.recursive_tensorize(col)
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
return self.recursive_tensorize(batch)
|
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HubSpotCompanyBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (create, update, get)", default="get"
)
company_data: dict = SchemaField(
description="Company data for create/update operations",
default_factory=dict,
)
domain: str = SchemaField(
description="Company domain for get/update operations", default=""
)
class Output(BlockSchema):
company: dict = SchemaField(description="Company information")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="3ae02219-d540-47cd-9c78-3ad6c7d9820a",
description="Manages HubSpot companies - create, update, and retrieve company information",
categories={BlockCategory.CRM},
input_schema=HubSpotCompanyBlock.Input,
output_schema=HubSpotCompanyBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com/crm/v3/objects/companies"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "create":
response = requests.post(
base_url, headers=headers, json={"properties": input_data.company_data}
)
result = response.json()
yield "company", result
yield "status", "created"
elif input_data.operation == "get":
search_url = f"{base_url}/search"
search_data = {
"filterGroups": [
{
"filters": [
{
"propertyName": "domain",
"operator": "EQ",
"value": input_data.domain,
}
]
}
]
}
response = requests.post(search_url, headers=headers, json=search_data)
result = response.json()
yield "company", result.get("results", [{}])[0]
yield "status", "retrieved"
elif input_data.operation == "update":
# First get company ID by domain
search_response = requests.post(
f"{base_url}/search",
headers=headers,
json={
"filterGroups": [
{
"filters": [
{
"propertyName": "domain",
"operator": "EQ",
"value": input_data.domain,
}
]
}
]
},
)
company_id = search_response.json().get("results", [{}])[0].get("id")
if company_id:
response = requests.patch(
f"{base_url}/{company_id}",
headers=headers,
json={"properties": input_data.company_data},
)
result = response.json()
yield "company", result
yield "status", "updated"
else:
yield "company", {}
yield "status", "company_not_found"
|
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HubSpotCompanyBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (create, update, get)", default="get"
)
company_data: dict = SchemaField(
description="Company data for create/update operations", default={}
)
domain: str = SchemaField(
description="Company domain for get/update operations", default=""
)
class Output(BlockSchema):
company: dict = SchemaField(description="Company information")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="3ae02219-d540-47cd-9c78-3ad6c7d9820a",
description="Manages HubSpot companies - create, update, and retrieve company information",
categories={BlockCategory.CRM},
input_schema=HubSpotCompanyBlock.Input,
output_schema=HubSpotCompanyBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com/crm/v3/objects/companies"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "create":
response = requests.post(
base_url, headers=headers, json={"properties": input_data.company_data}
)
result = response.json()
yield "company", result
yield "status", "created"
elif input_data.operation == "get":
search_url = f"{base_url}/search"
search_data = {
"filterGroups": [
{
"filters": [
{
"propertyName": "domain",
"operator": "EQ",
"value": input_data.domain,
}
]
}
]
}
response = requests.post(search_url, headers=headers, json=search_data)
result = response.json()
yield "company", result.get("results", [{}])[0]
yield "status", "retrieved"
elif input_data.operation == "update":
# First get company ID by domain
search_response = requests.post(
f"{base_url}/search",
headers=headers,
json={
"filterGroups": [
{
"filters": [
{
"propertyName": "domain",
"operator": "EQ",
"value": input_data.domain,
}
]
}
]
},
)
company_id = search_response.json().get("results", [{}])[0].get("id")
if company_id:
response = requests.patch(
f"{base_url}/{company_id}",
headers=headers,
json={"properties": input_data.company_data},
)
result = response.json()
yield "company", result
yield "status", "updated"
else:
yield "company", {}
yield "status", "company_not_found"
|
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
def create_openai_tools_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
strict: Optional[bool] = None,
) -> Runnable:
"""Create an agent that uses OpenAI tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Raises:
ValueError: If the prompt is missing required variables.
Example:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_tools_agent
prompt = hub.pull("hwchase17/openai-tools-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_tools_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
``MessagesPlaceholder``. Intermediate agent actions and tool output
messages will be passed in here.
Here's an example:
.. code-block:: python
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
MessagesPlaceholder("chat_history", optional=True),
("human", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
]
)
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables)
)
if missing_vars:
msg = f"Prompt missing required variables: {missing_vars}"
raise ValueError(msg)
llm_with_tools = llm.bind(
tools=[convert_to_openai_tool(tool, strict=strict) for tool in tools]
)
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
)
)
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
return agent
|
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
def create_openai_tools_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
strict: Optional[bool] = None,
) -> Runnable:
"""Create an agent that uses OpenAI tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Raises:
ValueError: If the prompt is missing required variables.
Example:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_tools_agent
prompt = hub.pull("hwchase17/openai-tools-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_tools_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
``MessagesPlaceholder``. Intermediate agent actions and tool output
messages will be passed in here.
Here's an example:
.. code-block:: python
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
MessagesPlaceholder("chat_history", optional=True),
("human", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
]
)
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables)
)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")
llm_with_tools = llm.bind(
tools=[convert_to_openai_tool(tool, strict=strict) for tool in tools]
)
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
)
)
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
return agent
|
"""Run smoke tests"""
import os
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_image, decode_jpeg, decode_webp, read_file
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = decode_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = decode_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
img_webp = decode_image(str(SCRIPT_DIR / "assets/fakedata/logos/rgb_pytorch.webp"))
if img_webp.shape != (3, 100, 100):
raise RuntimeError(f"Unexpected shape of img_webp: {img_webp.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = decode_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights, progress=False).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms(antialias=(device != "mps")) # antialias not supported on MPS
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
print(f"{torch.ops.image._jpeg_version() = }")
if not torch.ops.image._is_compiled_against_turbo():
msg = "Torchvision wasn't compiled against libjpeg-turbo"
if os.getenv("IS_M1_CONDA_BUILD_JOB") == "1":
# When building the conda package on M1, it's difficult to enforce
# that we build against turbo due to interactions with the libwebp
# package. So we just accept it, instead of raising an error.
print(msg)
else:
raise ValueError(msg)
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
# TODO: remove once pytorch/pytorch#110436 is resolved
# Temporary Disabling compile test. Untill triton with Manylinux2014 is available
# if sys.version_info < (3, 12, 0):
# smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import os
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_image, decode_jpeg, decode_webp, read_file
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = decode_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = decode_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
img_webp = decode_image(str(SCRIPT_DIR / "assets/fakedata/logos/rgb_pytorch.webp"))
if img_webp.shape != (3, 100, 100):
raise RuntimeError(f"Unexpected shape of img_webp: {img_webp.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = decode_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights, progress=False).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms(antialias=(device != "mps")) # antialias not supported on MPS
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
print(f"{torch.ops.image._jpeg_version() = }")
if not torch.ops.image._is_compiled_against_turbo():
msg = "Torchvision wasn't compiled against libjpeg-turbo"
if os.getenv("IS_M1_CONDA_BUILD_JOB") == "1":
# When building the conda package on M1, it's difficult to enforce
# that we build against turbo due to interactions with the libwebp
# package. So we just accept it, instead of raising an error.
print(msg)
else:
raise ValueError(msg)
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
# TODO: remove once pytorch/pytorch#110436 is resolved
if sys.version_info < (3, 12, 0):
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import ExtractHyperlinksTool
from langchain_community.tools.playwright.extract_hyperlinks import (
ExtractHyperlinksToolInput,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ExtractHyperlinksToolInput": (
"langchain_community.tools.playwright.extract_hyperlinks"
),
"ExtractHyperlinksTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ExtractHyperlinksTool",
"ExtractHyperlinksToolInput",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import ExtractHyperlinksTool
from langchain_community.tools.playwright.extract_hyperlinks import (
ExtractHyperlinksToolInput,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ExtractHyperlinksToolInput": (
"langchain_community.tools.playwright.extract_hyperlinks"
),
"ExtractHyperlinksTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ExtractHyperlinksToolInput",
"ExtractHyperlinksTool",
]
|
# coding=utf-8
# Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class ShieldGemma2Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ShieldGemma2ForImageClassification`]. It is used to instantiate an
ShieldGemma2ForImageClassification according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the shieldgemma-2-4b-it.
e.g. [google/gemma-3-4b](https://huggingface.co/google/gemma-3-4b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`Union[ShieldGemma2TextConfig, dict]`, *optional*):
The config object of the text backbone.
vision_config (`Union[AutoConfig, dict]`, *optional*):
Custom vision config or dict.
mm_tokens_per_image (`int`, *optional*, defaults to 256):
The number of tokens per image embedding.
boi_token_index (`int`, *optional*, defaults to 255999):
The begin-of-image token index to wrap the image prompt.
eoi_token_index (`int`, *optional*, defaults to 256000):
The end-of-image token index to wrap the image prompt.
image_token_index (`int`, *optional*, defaults to 262144):
The image token index to encode the image prompt.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import ShieldGemma2ForConditionalGeneration, ShieldGemma2Config, SiglipVisionConfig, ShieldGemma2TextConfig
>>> # Initializing a Siglip-like vision config
>>> vision_config = SiglipVisionConfig()
>>> # Initializing a ShieldGemma2 Text config
>>> text_config = ShieldGemma2TextConfig()
>>> # Initializing a ShieldGemma2 gemma-3-4b style configuration
>>> configuration = ShieldGemma2Config(vision_config, text_config)
>>> # Initializing a model from the gemma-3-4b style configuration
>>> model = ShieldGemma2TextConfig(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "shieldgemma2"
attribute_map = {
"image_token_id": "image_token_index",
"boi_token_id": "boi_token_index",
"eoi_token_id": "eoi_token_index",
}
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
text_config=None,
vision_config=None,
mm_tokens_per_image: int = 256,
boi_token_index: int = 255_999,
eoi_token_index: int = 256_000,
image_token_index: int = 262_144,
initializer_range: float = 0.02,
**kwargs,
):
if isinstance(vision_config, dict):
vision_config["model_type"] = (
vision_config["model_type"] if "model_type" in vision_config else "siglip_vision_model"
)
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["siglip_vision_model"]()
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "gemma3_text"
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["gemma3_text"]()
self.text_config = text_config
self.vision_config = vision_config
self.mm_tokens_per_image = mm_tokens_per_image
self.boi_token_index = boi_token_index
self.eoi_token_index = eoi_token_index
self.image_token_index = image_token_index
self.initializer_range = initializer_range
super().__init__(**kwargs)
__all__ = ["ShieldGemma2Config"]
|
# coding=utf-8
# Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class ShieldGemma2Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ShieldGemma2ForImageClassification`]. It is used to instantiate an
ShieldGemma2ForImageClassification according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the shieldgemma-2-4b-it.
e.g. [google/gemma-3-4b](https://huggingface.co/google/gemma-3-4b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`Union[ShieldGemma2TextConfig, dict]`, *optional*):
The config object of the text backbone.
vision_config (`Union[AutoConfig, dict]`, *optional*):
Custom vision config or dict.
mm_tokens_per_image (`int`, *optional*, defaults to 256):
The number of tokens per image embedding.
boi_token_index (`int`, *optional*, defaults to 255999):
The begin-of-image token index to wrap the image prompt.
eoi_token_index (`int`, *optional*, defaults to 256000):
The end-of-image token index to wrap the image prompt.
image_token_index (`int`, *optional*, defaults to 262144):
The image token index to encode the image prompt.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import ShieldGemma2ForConditionalGeneration, ShieldGemma2Config, SiglipVisionConfig, ShieldGemma2TextConfig
>>> # Initializing a Siglip-like vision config
>>> vision_config = SiglipVisionConfig()
>>> # Initializing a ShieldGemma2 Text config
>>> text_config = ShieldGemma2TextConfig()
>>> # Initializing a ShieldGemma2 gemma-3-4b style configuration
>>> configuration = ShieldGemma2Config(vision_config, text_config)
>>> # Initializing a model from the gemma-3-4b style configuration
>>> model = ShieldGemma2TextConfig(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "shieldgemma2"
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
text_config=None,
vision_config=None,
mm_tokens_per_image: int = 256,
boi_token_index: int = 255_999,
eoi_token_index: int = 256_000,
image_token_index: int = 262_144,
initializer_range: float = 0.02,
**kwargs,
):
if isinstance(vision_config, dict):
vision_config["model_type"] = (
vision_config["model_type"] if "model_type" in vision_config else "siglip_vision_model"
)
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["siglip_vision_model"]()
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "gemma3_text"
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["gemma3_text"]()
self.text_config = text_config
self.vision_config = vision_config
self.mm_tokens_per_image = mm_tokens_per_image
self.boi_token_index = boi_token_index
self.eoi_token_index = eoi_token_index
self.image_token_index = image_token_index
self.initializer_range = initializer_range
super().__init__(**kwargs)
__all__ = ["ShieldGemma2Config"]
|
import asyncio
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('async_iterator', [False, True])
async def test_request_streamer(prefetch, num_requests, async_iterator):
requests_handled = []
results_handled = []
def request_handler_fn(request):
requests_handled.append(request)
async def task():
await asyncio.sleep(0.5)
docs = request.docs
docs[0].tags['request_handled'] = True
request.data.docs = docs
return request
future = asyncio.ensure_future(task())
return future, None
def result_handle_fn(result):
results_handled.append(result)
assert isinstance(result, DataRequest)
docs = result.docs
docs[0].tags['result_handled'] = True
result.data.docs = docs
return result
def end_of_iter_fn():
# with a sync generator, iteration
assert len(requests_handled) == num_requests
assert len(results_handled) <= num_requests
def _yield_data_request():
req = DataRequest()
req.header.request_id = random_identity()
da = DocumentArray()
da.append(Document())
req.data.docs = da
return req
def _get_sync_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request()
async def _get_async_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request()
await asyncio.sleep(0.1)
args = Namespace()
args.prefetch = prefetch
streamer = RequestStreamer(
args=args,
request_handler=request_handler_fn,
result_handler=result_handle_fn,
end_of_iter_handler=end_of_iter_fn,
)
it = (
_get_async_requests_iterator(num_requests)
if async_iterator
else _get_sync_requests_iterator(num_requests)
)
response = streamer.stream(it)
num_responses = 0
async for r in response:
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
|
import asyncio
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('async_iterator', [False, True])
async def test_request_streamer(prefetch, num_requests, async_iterator):
requests_handled = []
results_handled = []
def request_handler_fn(request):
requests_handled.append(request)
async def task():
await asyncio.sleep(0.5)
docs = request.docs
docs[0].tags['request_handled'] = True
request.data.docs = docs
return request
future = asyncio.ensure_future(task())
return future
def result_handle_fn(result):
results_handled.append(result)
assert isinstance(result, DataRequest)
docs = result.docs
docs[0].tags['result_handled'] = True
result.data.docs = docs
return result
def end_of_iter_fn():
# with a sync generator, iteration
assert len(requests_handled) == num_requests
assert len(results_handled) <= num_requests
def _yield_data_request():
req = DataRequest()
req.header.request_id = random_identity()
da = DocumentArray()
da.append(Document())
req.data.docs = da
return req
def _get_sync_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request()
async def _get_async_requests_iterator(num_requests):
for i in range(num_requests):
yield _yield_data_request()
await asyncio.sleep(0.1)
args = Namespace()
args.prefetch = prefetch
streamer = RequestStreamer(
args=args,
request_handler=request_handler_fn,
result_handler=result_handle_fn,
end_of_iter_handler=end_of_iter_fn,
)
it = (
_get_async_requests_iterator(num_requests)
if async_iterator
else _get_sync_requests_iterator(num_requests)
)
response = streamer.stream(it)
num_responses = 0
async for r in response:
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
|
import re
import torch
from torch.utils.hipify.hipify_python import PYTORCH_MAP, PYTORCH_TRIE
# It is not a good idea to directly apply hipify_torch to codegen, which will be vulnerable to cases like:
# "...
# from ..codecache import CudaKernelParamCache
# ..."
# In such cases, we do not need to hipify_torch the original class/file name in codegen/codecache
def maybe_hipify_code_wrapper(source_codes: str, force_hipify: bool = False) -> str:
if torch.version.hip is None and not force_hipify:
return source_codes
def c2_repl(m: re.Match[str]) -> object:
return PYTORCH_MAP[m.group(0)]
# We need to redefine RE_PYTORCH_PREPROCESSOR here since in hipify_torch,
# it will apply positive lookbehind (?<=\W) to the pattern to avoid matching
# keyword at the beginning of code line. However, this can happen in codegen,
# which will cause the pattern to not match.
# Note that lookahead (?=\W) is still needed to keep hipification idomponent, for example
# we need to skip replacing "getStreamFromExternal" in "getStreamFromExternalMasqueradingAsCUDA"
RE_PYTORCH_PREPROCESSOR = re.compile(rf"({PYTORCH_TRIE.export_to_regex()})(?=\W)")
source_codes = RE_PYTORCH_PREPROCESSOR.sub(c2_repl, source_codes) # type: ignore[arg-type]
return source_codes
|
import re
import torch
from torch.utils.hipify.hipify_python import PYTORCH_MAP, PYTORCH_TRIE
# It is not a good idea to directly apply hipify_torch to codegen, which will be vulnerable to cases like:
# "...
# from ..codecache import CudaKernelParamCache
# ..."
# In such cases, we do not need to hipify_torch the orignial class/file name in codegen/codecache
def maybe_hipify_code_wrapper(source_codes: str, force_hipify: bool = False) -> str:
if torch.version.hip is None and not force_hipify:
return source_codes
def c2_repl(m: re.Match[str]) -> object:
return PYTORCH_MAP[m.group(0)]
# We need to redefine RE_PYTORCH_PREPROCESSOR here since in hipify_torch,
# it will apply positive lookbehind (?<=\W) to the pattern to avoid matching
# keyword at the beginning of code line. However, this can happen in codegen,
# which will cause the pattern to not match.
# Note that lookahead (?=\W) is still needed to keep hipification idomponent, for example
# we need to skip replacing "getStreamFromExternal" in "getStreamFromExternalMasqueradingAsCUDA"
RE_PYTORCH_PREPROCESSOR = re.compile(rf"({PYTORCH_TRIE.export_to_regex()})(?=\W)")
source_codes = RE_PYTORCH_PREPROCESSOR.sub(c2_repl, source_codes) # type: ignore[arg-type]
return source_codes
|
"""Graph Database Cypher Reader."""
from typing import Dict, List, Optional
import yaml
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class GraphDBCypherReader(BaseReader):
"""
Graph database Cypher reader.
Combines all Cypher query results into the Document type used by LlamaIndex.
Args:
uri (str): Graph Database URI
username (str): Username
password (str): Password
"""
def __init__(self, uri: str, username: str, password: str, database: str) -> None:
"""Initialize with parameters."""
try:
from neo4j import GraphDatabase, basic_auth
except ImportError:
raise ImportError(
"`neo4j` package not found, please run `pip install neo4j`"
)
if uri:
if uri is None:
raise ValueError("`uri` must be provided.")
self.client = GraphDatabase.driver(
uri=uri, auth=basic_auth(username, password)
)
self.database = database
def load_data(
self, query: str, parameters: Optional[Dict] = None
) -> List[Document]:
"""
Run the Cypher with optional parameters and turn results into documents.
Args:
query (str): Graph Cypher query string.
parameters (Optional[Dict]): optional query parameters.
Returns:
List[Document]: A list of documents.
"""
if parameters is None:
parameters = {}
records, summary, keys = self.client.execute_query(
query, parameters, database_=self.database
)
return [Document(text=yaml.dump(entry.data())) for entry in records]
|
"""Graph Database Cypher Reader."""
from typing import Dict, List, Optional
import yaml
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class GraphDBCypherReader(BaseReader):
"""Graph database Cypher reader.
Combines all Cypher query results into the Document type used by LlamaIndex.
Args:
uri (str): Graph Database URI
username (str): Username
password (str): Password
"""
def __init__(self, uri: str, username: str, password: str, database: str) -> None:
"""Initialize with parameters."""
try:
from neo4j import GraphDatabase, basic_auth
except ImportError:
raise ImportError(
"`neo4j` package not found, please run `pip install neo4j`"
)
if uri:
if uri is None:
raise ValueError("`uri` must be provided.")
self.client = GraphDatabase.driver(
uri=uri, auth=basic_auth(username, password)
)
self.database = database
def load_data(
self, query: str, parameters: Optional[Dict] = None
) -> List[Document]:
"""Run the Cypher with optional parameters and turn results into documents.
Args:
query (str): Graph Cypher query string.
parameters (Optional[Dict]): optional query parameters.
Returns:
List[Document]: A list of documents.
"""
if parameters is None:
parameters = {}
records, summary, keys = self.client.execute_query(
query, parameters, database_=self.database
)
return [Document(text=yaml.dump(entry.data())) for entry in records]
|
from typing import Any, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from pytest_mock import MockerFixture
from syrupy.assertion import SnapshotAssertion
from langchain.runnables.openai_functions import OpenAIFunctionsRouter
class FakeChatOpenAI(BaseChatModel):
@property
def _llm_type(self) -> str:
return "fake-openai-chat-model"
def _generate(
self,
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": "accept",
"arguments": '{\n "draft": "turtles"\n}',
},
},
),
),
],
)
def test_openai_functions_router(
snapshot: SnapshotAssertion,
mocker: MockerFixture,
) -> None:
revise = mocker.Mock(
side_effect=lambda kw: f"Revised draft: no more {kw['notes']}!",
)
accept = mocker.Mock(side_effect=lambda kw: f"Accepted draft: {kw['draft']}!")
router = OpenAIFunctionsRouter(
{
"revise": revise,
"accept": accept,
},
functions=[
{
"name": "revise",
"description": "Sends the draft for revision.",
"parameters": {
"type": "object",
"properties": {
"notes": {
"type": "string",
"description": "The editor's notes to guide the revision.",
},
},
},
},
{
"name": "accept",
"description": "Accepts the draft.",
"parameters": {
"type": "object",
"properties": {
"draft": {
"type": "string",
"description": "The draft to accept.",
},
},
},
},
],
)
model = FakeChatOpenAI()
chain = model.bind(functions=router.functions) | router
assert router.functions == snapshot
assert chain.invoke("Something about turtles?") == "Accepted draft: turtles!"
revise.assert_not_called()
accept.assert_called_once_with({"draft": "turtles"})
|
from typing import Any, Optional
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from pytest_mock import MockerFixture
from syrupy.assertion import SnapshotAssertion
from langchain.runnables.openai_functions import OpenAIFunctionsRouter
class FakeChatOpenAI(BaseChatModel):
@property
def _llm_type(self) -> str:
return "fake-openai-chat-model"
def _generate(
self,
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(
generations=[
ChatGeneration(
message=AIMessage(
content="",
additional_kwargs={
"function_call": {
"name": "accept",
"arguments": '{\n "draft": "turtles"\n}',
}
},
)
)
]
)
def test_openai_functions_router(
snapshot: SnapshotAssertion, mocker: MockerFixture
) -> None:
revise = mocker.Mock(
side_effect=lambda kw: f"Revised draft: no more {kw['notes']}!"
)
accept = mocker.Mock(side_effect=lambda kw: f"Accepted draft: {kw['draft']}!")
router = OpenAIFunctionsRouter(
{
"revise": revise,
"accept": accept,
},
functions=[
{
"name": "revise",
"description": "Sends the draft for revision.",
"parameters": {
"type": "object",
"properties": {
"notes": {
"type": "string",
"description": "The editor's notes to guide the revision.",
},
},
},
},
{
"name": "accept",
"description": "Accepts the draft.",
"parameters": {
"type": "object",
"properties": {
"draft": {
"type": "string",
"description": "The draft to accept.",
},
},
},
},
],
)
model = FakeChatOpenAI()
chain = model.bind(functions=router.functions) | router
assert router.functions == snapshot
assert chain.invoke("Something about turtles?") == "Accepted draft: turtles!"
revise.assert_not_called()
accept.assert_called_once_with({"draft": "turtles"})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.