input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import TextDoc
def test_simple_init():
t = TextDoc(text='hello')
assert t.text == 'hello'
def test_str_init():
t = parse_obj_as(TextDoc, 'hello')
assert t.text == 'hello'
def test_doc():
class MyDoc(BaseDocument):
text1: TextDoc
text2: TextDoc
doc = MyDoc(text1='hello', text2=TextDoc(text='world'))
assert doc.text1.text == 'hello'
assert doc.text2.text == 'world'
|
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Text
def test_simple_init():
t = Text(text='hello')
assert t.text == 'hello'
def test_str_init():
t = parse_obj_as(Text, 'hello')
assert t.text == 'hello'
def test_doc():
class MyDoc(BaseDocument):
text1: Text
text2: Text
doc = MyDoc(text1='hello', text2=Text(text='world'))
assert doc.text1.text == 'hello'
assert doc.text2.text == 'world'
|
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3)
L_x, L_y = 89, 43
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
actual = fn(x, y, mode)
expected = torch.stack(
[
torch.stack(
[fn(x[i, j].unsqueeze(0), y[i, j].unsqueeze(0), mode).squeeze(0) for j in range(leading_dims[1])]
)
for i in range(leading_dims[0])
]
)
self.assertEqual(expected, actual)
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device) * 10
actual = F.add_noise(waveform, noise, lengths, snr)
expected = []
for i in range(leading_dims[0]):
for j in range(leading_dims[1]):
for k in range(leading_dims[2]):
expected.append(F.add_noise(waveform[i][j][k], noise[i][j][k], lengths[i][j][k], snr[i][j][k]))
self.assertEqual(torch.stack(expected), actual.reshape(-1, L))
|
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TorchaudioTestCase
class BatchConsistencyTest(TorchaudioTestCase):
@nested_params(
[F.convolve, F.fftconvolve],
)
def test_convolve(self, fn):
leading_dims = (2, 3)
L_x, L_y = 89, 43
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
actual = fn(x, y)
expected = torch.stack(
[
torch.stack([fn(x[i, j].unsqueeze(0), y[i, j].unsqueeze(0)).squeeze(0) for j in range(leading_dims[1])])
for i in range(leading_dims[0])
]
)
self.assertEqual(expected, actual)
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device) * 10
actual = F.add_noise(waveform, noise, lengths, snr)
expected = []
for i in range(leading_dims[0]):
for j in range(leading_dims[1]):
for k in range(leading_dims[2]):
expected.append(F.add_noise(waveform[i][j][k], noise[i][j][k], lengths[i][j][k], snr[i][j][k]))
self.assertEqual(torch.stack(expected), actual.reshape(-1, L))
|
from __future__ import annotations
import os
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def stsb_bert_tiny_model_onnx() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-onnx")
@pytest.fixture()
def stsb_bert_tiny_model_openvino() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-openvino")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("sentence-transformers/stsb")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
from __future__ import annotations
import os
import pytest
from sentence_transformers import CrossEncoder, SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
from tests.utils import SafeTemporaryDirectory
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def stsb_bert_tiny_model_onnx() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-onnx")
@pytest.fixture()
def stsb_bert_tiny_model_openvino() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-openvino")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def distilroberta_base_ce_model() -> CrossEncoder:
return CrossEncoder("distilroberta-base", num_labels=1)
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> DatasetDict:
return load_dataset("mteb/stsbenchmark-sts")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
"""
if os.environ.get("CI", None):
# Note: `ignore_cleanup_errors=True` is used to avoid NotADirectoryError in Windows on GitHub Actions.
# See https://github.com/python/cpython/issues/107408, https://www.scivision.dev/python-tempfile-permission-error-windows/
with SafeTemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py'
# training schedule for 2x
train_cfg = dict(max_epochs=24)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 23])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
import torch
__all__ = ["DeepSpeech"]
class FullyConnected(torch.nn.Module):
"""
Args:
n_feature: Number of input features
n_hidden: Internal hidden unit size.
"""
def __init__(self, n_feature: int, n_hidden: int, dropout: float, relu_max_clip: int = 20) -> None:
super(FullyConnected, self).__init__()
self.fc = torch.nn.Linear(n_feature, n_hidden, bias=True)
self.relu_max_clip = relu_max_clip
self.dropout = dropout
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.fc(x)
x = torch.nn.functional.relu(x)
x = torch.nn.functional.hardtanh(x, 0, self.relu_max_clip)
if self.dropout:
x = torch.nn.functional.dropout(x, self.dropout, self.training)
return x
class DeepSpeech(torch.nn.Module):
"""DeepSpeech architecture introduced in
*Deep Speech: Scaling up end-to-end speech recognition* :cite:`hannun2014deep`.
Args:
n_feature: Number of input features
n_hidden: Internal hidden unit size.
n_class: Number of output classes
"""
def __init__(
self,
n_feature: int,
n_hidden: int = 2048,
n_class: int = 40,
dropout: float = 0.0,
) -> None:
super(DeepSpeech, self).__init__()
self.n_hidden = n_hidden
self.fc1 = FullyConnected(n_feature, n_hidden, dropout)
self.fc2 = FullyConnected(n_hidden, n_hidden, dropout)
self.fc3 = FullyConnected(n_hidden, n_hidden, dropout)
self.bi_rnn = torch.nn.RNN(n_hidden, n_hidden, num_layers=1, nonlinearity="relu", bidirectional=True)
self.fc4 = FullyConnected(n_hidden, n_hidden, dropout)
self.out = torch.nn.Linear(n_hidden, n_class)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): Tensor of dimension (batch, channel, time, feature).
Returns:
Tensor: Predictor tensor of dimension (batch, time, class).
"""
# N x C x T x F
x = self.fc1(x)
# N x C x T x H
x = self.fc2(x)
# N x C x T x H
x = self.fc3(x)
# N x C x T x H
x = x.squeeze(1)
# N x T x H
x = x.transpose(0, 1)
# T x N x H
x, _ = self.bi_rnn(x)
# The fifth (non-recurrent) layer takes both the forward and backward units as inputs
x = x[:, :, : self.n_hidden] + x[:, :, self.n_hidden :]
# T x N x H
x = self.fc4(x)
# T x N x H
x = self.out(x)
# T x N x n_class
x = x.permute(1, 0, 2)
# N x T x n_class
x = torch.nn.functional.log_softmax(x, dim=2)
# N x T x n_class
return x
|
import torch
__all__ = ["DeepSpeech"]
class FullyConnected(torch.nn.Module):
"""
Args:
n_feature: Number of input features
n_hidden: Internal hidden unit size.
"""
def __init__(self, n_feature: int, n_hidden: int, dropout: float, relu_max_clip: int = 20) -> None:
super(FullyConnected, self).__init__()
self.fc = torch.nn.Linear(n_feature, n_hidden, bias=True)
self.relu_max_clip = relu_max_clip
self.dropout = dropout
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.fc(x)
x = torch.nn.functional.relu(x)
x = torch.nn.functional.hardtanh(x, 0, self.relu_max_clip)
if self.dropout:
x = torch.nn.functional.dropout(x, self.dropout, self.training)
return x
class DeepSpeech(torch.nn.Module):
"""
DeepSpeech model architecture from *Deep Speech: Scaling up end-to-end speech recognition*
:cite:`hannun2014deep`.
Args:
n_feature: Number of input features
n_hidden: Internal hidden unit size.
n_class: Number of output classes
"""
def __init__(
self,
n_feature: int,
n_hidden: int = 2048,
n_class: int = 40,
dropout: float = 0.0,
) -> None:
super(DeepSpeech, self).__init__()
self.n_hidden = n_hidden
self.fc1 = FullyConnected(n_feature, n_hidden, dropout)
self.fc2 = FullyConnected(n_hidden, n_hidden, dropout)
self.fc3 = FullyConnected(n_hidden, n_hidden, dropout)
self.bi_rnn = torch.nn.RNN(n_hidden, n_hidden, num_layers=1, nonlinearity="relu", bidirectional=True)
self.fc4 = FullyConnected(n_hidden, n_hidden, dropout)
self.out = torch.nn.Linear(n_hidden, n_class)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): Tensor of dimension (batch, channel, time, feature).
Returns:
Tensor: Predictor tensor of dimension (batch, time, class).
"""
# N x C x T x F
x = self.fc1(x)
# N x C x T x H
x = self.fc2(x)
# N x C x T x H
x = self.fc3(x)
# N x C x T x H
x = x.squeeze(1)
# N x T x H
x = x.transpose(0, 1)
# T x N x H
x, _ = self.bi_rnn(x)
# The fifth (non-recurrent) layer takes both the forward and backward units as inputs
x = x[:, :, : self.n_hidden] + x[:, :, self.n_hidden :]
# T x N x H
x = self.fc4(x)
# T x N x H
x = self.out(x)
# T x N x n_class
x = x.permute(1, 0, 2)
# N x T x n_class
x = torch.nn.functional.log_softmax(x, dim=2)
# N x T x n_class
return x
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
It also demonstrates how to stream data which is helpful in case you don't
want to wait for an extremely large dataset to download, or if you want to
limit the amount of memory used. More info about dataset streaming:
https://huggingface.co/docs/datasets/stream
"""
import logging
from torch.utils.data import DataLoader
from tqdm import tqdm
from datasets import load_dataset
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Set params
data_stream_size = 16384 # Size of the data that is loaded into memory at once
chunk_size = 1024 # Size of the chunks that are sent to each process
encode_batch_size = 128 # Batch size of the model
# Load a large dataset in streaming mode. more info: https://huggingface.co/docs/datasets/stream
dataset = load_dataset("yahoo_answers_topics", split="train", streaming=True)
dataloader = DataLoader(dataset.with_format("torch"), batch_size=data_stream_size)
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
for i, batch in enumerate(tqdm(dataloader)):
# Compute the embeddings using the multi-process pool
sentences = batch["best_answer"]
batch_emb = model.encode_multi_process(sentences, pool, chunk_size=chunk_size, batch_size=encode_batch_size)
print("Embeddings computed for 1 batch. Shape:", batch_emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
It also demonstrates how to stream data which is helpful in case you don't
want to wait for an extremely large dataset to download, or if you want to
limit the amount of memory used. More info about dataset streaming:
https://huggingface.co/docs/datasets/stream
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
import logging
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == '__main__':
#Set params
data_stream_size = 16384 #Size of the data that is loaded into memory at once
chunk_size = 1024 #Size of the chunks that are sent to each process
encode_batch_size = 128 #Batch size of the model
#Load a large dataset in streaming mode. more info: https://huggingface.co/docs/datasets/stream
dataset = load_dataset('yahoo_answers_topics', split='train', streaming=True)
dataloader = DataLoader(dataset.with_format("torch"), batch_size=data_stream_size)
#Define the model
model = SentenceTransformer('all-MiniLM-L6-v2')
#Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
for i, batch in enumerate(tqdm(dataloader)):
#Compute the embeddings using the multi-process pool
sentences = batch['best_answer']
batch_emb = model.encode_multi_process(sentences, pool, chunk_size=chunk_size, batch_size=encode_batch_size)
print("Embeddings computed for 1 batch. Shape:", batch_emb.shape)
#Optional: Stop the proccesses in the pool
model.stop_multi_process_pool(pool)
|
"""Utils for jupyter notebook."""
import os
from io import BytesIO
from typing import Any, Dict, List, Tuple
import matplotlib.pyplot as plt
import requests
from IPython.display import Markdown, display
from llama_index.core.base.response.schema import Response
from llama_index.core.img_utils import b64_2_img
from llama_index.core.schema import ImageNode, MetadataMode, NodeWithScore
from llama_index.core.utils import truncate_text
from PIL import Image
DEFAULT_THUMBNAIL_SIZE = (512, 512)
DEFAULT_IMAGE_MATRIX = (3, 3)
DEFAULT_SHOW_TOP_K = 3
def display_image(img_str: str, size: Tuple[int, int] = DEFAULT_THUMBNAIL_SIZE) -> None:
"""Display base64 encoded image str as image for jupyter notebook."""
img = b64_2_img(img_str)
img.thumbnail(size)
display(img)
def display_image_uris(
image_paths: List[str],
image_matrix: Tuple[int, int] = DEFAULT_IMAGE_MATRIX,
top_k: int = DEFAULT_SHOW_TOP_K,
) -> None:
"""Display base64 encoded image str as image for jupyter notebook."""
images_shown = 0
plt.figure(figsize=(16, 9))
for img_path in image_paths[:top_k]:
if os.path.isfile(img_path):
image = Image.open(img_path)
plt.subplot(image_matrix[0], image_matrix[1], images_shown + 1)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
images_shown += 1
if images_shown >= image_matrix[0] * image_matrix[1]:
break
def display_source_node(
source_node: NodeWithScore,
source_length: int = 100,
show_source_metadata: bool = False,
metadata_mode: MetadataMode = MetadataMode.NONE,
) -> None:
"""Display source node for jupyter notebook."""
source_text_fmt = truncate_text(
source_node.node.get_content(metadata_mode=metadata_mode).strip(), source_length
)
text_md = (
f"**Node ID:** {source_node.node.node_id}<br>"
f"**Similarity:** {source_node.score}<br>"
f"**Text:** {source_text_fmt}<br>"
)
if show_source_metadata:
text_md += f"**Metadata:** {source_node.node.metadata}<br>"
if isinstance(source_node.node, ImageNode):
text_md += "**Image:**"
display(Markdown(text_md))
if isinstance(source_node.node, ImageNode) and source_node.node.image is not None:
display_image(source_node.node.image)
def display_metadata(metadata: Dict[str, Any]) -> None:
"""Display metadata for jupyter notebook."""
display(metadata)
def display_response(
response: Response,
source_length: int = 100,
show_source: bool = False,
show_metadata: bool = False,
show_source_metadata: bool = False,
) -> None:
"""Display response for jupyter notebook."""
if response.response is None:
response_text = "None"
else:
response_text = response.response.strip()
display(Markdown(f"**`Final Response:`** {response_text}"))
if show_source:
for ind, source_node in enumerate(response.source_nodes):
display(Markdown("---"))
display(
Markdown(f"**`Source Node {ind + 1}/{len(response.source_nodes)}`**")
)
display_source_node(
source_node,
source_length=source_length,
show_source_metadata=show_source_metadata,
)
if show_metadata:
if response.metadata is not None:
display_metadata(response.metadata)
def display_query_and_multimodal_response(
query_str: str, response: Response, plot_height: int = 2, plot_width: int = 5
) -> None:
"""For displaying a query and its multi-modal response."""
if response.metadata:
image_nodes = response.metadata["image_nodes"] or []
else:
image_nodes = []
num_subplots = len(image_nodes)
f, axarr = plt.subplots(1, num_subplots)
f.set_figheight(plot_height)
f.set_figwidth(plot_width)
ix = 0
for ix, scored_img_node in enumerate(image_nodes):
img_node = scored_img_node.node
image = None
if img_node.image_url:
img_response = requests.get(img_node.image_url)
image = Image.open(BytesIO(img_response.content)).convert("RGB")
elif img_node.image_path:
image = Image.open(img_node.image_path).convert("RGB")
else:
raise ValueError(
"A retrieved image must have image_path or image_url specified."
)
if num_subplots > 1:
axarr[ix].imshow(image)
axarr[ix].set_title(f"Retrieved Position: {ix}", pad=10, fontsize=9)
else:
axarr.imshow(image)
axarr.set_title(f"Retrieved Position: {ix}", pad=10, fontsize=9)
f.tight_layout()
print(f"Query: {query_str}\n=======")
print(f"Retrieved Images:\n")
plt.show()
print("=======")
print(f"Response: {response.response}\n=======\n")
|
"""Utils for jupyter notebook."""
import os
from io import BytesIO
from typing import Any, Dict, List, Tuple
import matplotlib.pyplot as plt
import requests
from IPython.display import Markdown, display
from llama_index.core.base.response.schema import Response
from llama_index.core.img_utils import b64_2_img
from llama_index.core.schema import ImageNode, MetadataMode, NodeWithScore
from llama_index.core.utils import truncate_text
from PIL import Image
DEFAULT_THUMBNAIL_SIZE = (512, 512)
DEFAULT_IMAGE_MATRIX = (3, 3)
DEFAULT_SHOW_TOP_K = 3
def display_image(img_str: str, size: Tuple[int, int] = DEFAULT_THUMBNAIL_SIZE) -> None:
"""Display base64 encoded image str as image for jupyter notebook."""
img = b64_2_img(img_str)
img.thumbnail(size)
display(img)
def display_image_uris(
image_paths: List[str],
image_matrix: Tuple[int, int] = DEFAULT_IMAGE_MATRIX,
top_k: int = DEFAULT_SHOW_TOP_K,
) -> None:
"""Display base64 encoded image str as image for jupyter notebook."""
images_shown = 0
plt.figure(figsize=(16, 9))
for img_path in image_paths[:top_k]:
if os.path.isfile(img_path):
image = Image.open(img_path)
plt.subplot(image_matrix[0], image_matrix[1], images_shown + 1)
plt.imshow(image)
plt.xticks([])
plt.yticks([])
images_shown += 1
if images_shown >= image_matrix[0] * image_matrix[1]:
break
def display_source_node(
source_node: NodeWithScore,
source_length: int = 100,
show_source_metadata: bool = False,
metadata_mode: MetadataMode = MetadataMode.NONE,
) -> None:
"""Display source node for jupyter notebook."""
source_text_fmt = truncate_text(
source_node.node.get_content(metadata_mode=metadata_mode).strip(), source_length
)
text_md = (
f"**Node ID:** {source_node.node.node_id}<br>"
f"**Similarity:** {source_node.score}<br>"
f"**Text:** {source_text_fmt}<br>"
)
if show_source_metadata:
text_md += f"**Metadata:** {source_node.node.metadata}<br>"
if isinstance(source_node.node, ImageNode):
text_md += "**Image:**"
display(Markdown(text_md))
if isinstance(source_node.node, ImageNode) and source_node.node.image is not None:
display_image(source_node.node.image)
def display_metadata(metadata: Dict[str, Any]) -> None:
"""Display metadata for jupyter notebook."""
display(metadata)
def display_response(
response: Response,
source_length: int = 100,
show_source: bool = False,
show_metadata: bool = False,
show_source_metadata: bool = False,
) -> None:
"""Display response for jupyter notebook."""
if response.response is None:
response_text = "None"
else:
response_text = response.response.strip()
display(Markdown(f"**`Final Response:`** {response_text}"))
if show_source:
for ind, source_node in enumerate(response.source_nodes):
display(Markdown("---"))
display(
Markdown(f"**`Source Node {ind + 1}/{len(response.source_nodes)}`**")
)
display_source_node(
source_node,
source_length=source_length,
show_source_metadata=show_source_metadata,
)
if show_metadata:
if response.metadata is not None:
display_metadata(response.metadata)
def display_query_and_multimodal_response(
query_str: str, response: Response, plot_height: int = 2, plot_width: int = 5
) -> None:
"""For displaying a query and its multi-modal response."""
if response.metadata:
image_nodes = response.metadata["image_nodes"] or []
else:
image_nodes = []
num_subplots = len(image_nodes)
f, axarr = plt.subplots(1, num_subplots)
f.set_figheight(plot_height)
f.set_figwidth(plot_width)
ix = 0
for ix, scored_img_node in enumerate(image_nodes):
img_node = scored_img_node.node
image = None
if img_node.image_url:
img_response = requests.get(img_node.image_url)
image = Image.open(BytesIO(img_response.content)).convert("RGB")
elif img_node.image_path:
image = Image.open(img_node.image_path).convert("RGB")
else:
raise ValueError(
"A retrieved image must have image_path or image_url specified."
)
if num_subplots > 1:
axarr[ix].imshow(image)
axarr[ix].set_title(f"Retrieved Position: {ix}", pad=10, fontsize=9)
else:
axarr.imshow(image)
axarr.set_title(f"Retrieved Position: {ix}", pad=10, fontsize=9)
f.tight_layout()
print(f"Query: {query_str}\n=======")
print(f"Retrieved Images:\n")
plt.show()
print("=======")
print(f"Response: {response.response}\n=======\n")
|
# Copyright (c) OpenMMLab. All rights reserved.
from .local_visualizer import DetLocalVisualizer
from .palette import get_palette, jitter_color, palette_val
__all__ = ['palette_val', 'get_palette', 'DetLocalVisualizer', 'jitter_color']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .local_visualizer import DetLocalVisualizer
from .palette import get_palette, palette_val
__all__ = ['palette_val', 'get_palette', 'DetLocalVisualizer']
|
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing.updater import get_basescore
rng = np.random.RandomState(1994)
class TestEarlyStopping:
@pytest.mark.skipif(**tm.no_sklearn())
def test_early_stopping_nonparallel(self):
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
digits = load_digits(n_class=2)
X = digits['data']
y = digits['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf1 = xgb.XGBClassifier(learning_rate=0.1)
clf1.fit(X_train, y_train, early_stopping_rounds=5, eval_metric="auc",
eval_set=[(X_test, y_test)])
clf2 = xgb.XGBClassifier(learning_rate=0.1)
clf2.fit(X_train, y_train, early_stopping_rounds=4, eval_metric="auc",
eval_set=[(X_test, y_test)])
# should be the same
assert clf1.best_score == clf2.best_score
assert clf1.best_score != 1
# check overfit
clf3 = xgb.XGBClassifier(
learning_rate=0.1,
eval_metric="auc",
early_stopping_rounds=10
)
clf3.fit(X_train, y_train, eval_set=[(X_test, y_test)])
base_score = get_basescore(clf3)
assert 0.53 > base_score > 0.5
clf3 = xgb.XGBClassifier(
learning_rate=0.1,
base_score=.5,
eval_metric="auc",
early_stopping_rounds=10
)
clf3.fit(X_train, y_train, eval_set=[(X_test, y_test)])
assert clf3.best_score == 1
def evalerror(self, preds, dtrain):
from sklearn.metrics import mean_squared_error
labels = dtrain.get_label()
preds = 1.0 / (1.0 + np.exp(-preds))
return 'rmse', mean_squared_error(labels, preds)
@staticmethod
def assert_metrics_length(cv, expected_length):
for key, value in cv.items():
assert len(value) == expected_length
@pytest.mark.skipif(**tm.no_sklearn())
def test_cv_early_stopping(self):
from sklearn.datasets import load_digits
digits = load_digits(n_class=2)
X = digits['data']
y = digits['target']
dm = xgb.DMatrix(X, label=y)
params = {
'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic',
'eval_metric': 'error'
}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
early_stopping_rounds=10)
self.assert_metrics_length(cv, 10)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
early_stopping_rounds=5)
self.assert_metrics_length(cv, 3)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
early_stopping_rounds=1)
self.assert_metrics_length(cv, 1)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
feval=self.evalerror, early_stopping_rounds=10)
self.assert_metrics_length(cv, 10)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
feval=self.evalerror, early_stopping_rounds=1)
self.assert_metrics_length(cv, 5)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
feval=self.evalerror, maximize=True,
early_stopping_rounds=1)
self.assert_metrics_length(cv, 1)
@pytest.mark.skipif(**tm.no_sklearn())
@pytest.mark.skipif(**tm.no_pandas())
def test_cv_early_stopping_with_multiple_eval_sets_and_metrics(self):
from sklearn.datasets import load_breast_cancer
X, y = load_breast_cancer(return_X_y=True)
dm = xgb.DMatrix(X, label=y)
params = {'objective':'binary:logistic'}
metrics = [['auc'], ['error'], ['logloss'],
['logloss', 'auc'], ['logloss', 'error'], ['error', 'logloss']]
num_iteration_history = []
# If more than one metrics is given, early stopping should use the last metric
for i, m in enumerate(metrics):
result = xgb.cv(params, dm, num_boost_round=1000, nfold=5, stratified=True,
metrics=m, early_stopping_rounds=20, seed=42)
num_iteration_history.append(len(result))
df = result['test-{}-mean'.format(m[-1])]
# When early stopping is invoked, the last metric should be as best it can be.
if m[-1] == 'auc':
assert np.all(df <= df.iloc[-1])
else:
assert np.all(df >= df.iloc[-1])
assert num_iteration_history[:3] == num_iteration_history[3:]
|
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing.updater import get_basescore
rng = np.random.RandomState(1994)
class TestEarlyStopping:
@pytest.mark.skipif(**tm.no_sklearn())
def test_early_stopping_nonparallel(self):
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
digits = load_digits(n_class=2)
X = digits['data']
y = digits['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf1 = xgb.XGBClassifier(learning_rate=0.1)
clf1.fit(X_train, y_train, early_stopping_rounds=5, eval_metric="auc",
eval_set=[(X_test, y_test)])
clf2 = xgb.XGBClassifier(learning_rate=0.1)
clf2.fit(X_train, y_train, early_stopping_rounds=4, eval_metric="auc",
eval_set=[(X_test, y_test)])
# should be the same
assert clf1.best_score == clf2.best_score
assert clf1.best_score != 1
# check overfit
clf3 = xgb.XGBClassifier(
learning_rate=0.1,
eval_metric="auc",
early_stopping_rounds=10
)
clf3.fit(X_train, y_train, eval_set=[(X_test, y_test)])
base_score = get_basescore(clf3)
assert 0.53 > base_score > 0.5
clf3 = xgb.XGBClassifier(
learning_rate=0.1,
base_score=.5,
eval_metric="auc",
early_stopping_rounds=10
)
clf3.fit(X_train, y_train, eval_set=[(X_test, y_test)])
assert clf3.best_score == 1
def evalerror(self, preds, dtrain):
from sklearn.metrics import mean_squared_error
labels = dtrain.get_label()
preds = 1.0 / (1.0 + np.exp(-preds))
return 'rmse', mean_squared_error(labels, preds)
@staticmethod
def assert_metrics_length(cv, expected_length):
for key, value in cv.items():
assert len(value) == expected_length
@pytest.mark.skipif(**tm.no_sklearn())
def test_cv_early_stopping(self):
from sklearn.datasets import load_digits
digits = load_digits(n_class=2)
X = digits['data']
y = digits['target']
dm = xgb.DMatrix(X, label=y)
params = {'max_depth': 2, 'eta': 1, 'verbosity': 0,
'objective': 'binary:logistic', 'eval_metric': 'error'}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
early_stopping_rounds=10)
self.assert_metrics_length(cv, 10)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
early_stopping_rounds=5)
self.assert_metrics_length(cv, 3)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
early_stopping_rounds=1)
self.assert_metrics_length(cv, 1)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
feval=self.evalerror, early_stopping_rounds=10)
self.assert_metrics_length(cv, 10)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
feval=self.evalerror, early_stopping_rounds=1)
self.assert_metrics_length(cv, 5)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
feval=self.evalerror, maximize=True,
early_stopping_rounds=1)
self.assert_metrics_length(cv, 1)
@pytest.mark.skipif(**tm.no_sklearn())
@pytest.mark.skipif(**tm.no_pandas())
def test_cv_early_stopping_with_multiple_eval_sets_and_metrics(self):
from sklearn.datasets import load_breast_cancer
X, y = load_breast_cancer(return_X_y=True)
dm = xgb.DMatrix(X, label=y)
params = {'objective':'binary:logistic'}
metrics = [['auc'], ['error'], ['logloss'],
['logloss', 'auc'], ['logloss', 'error'], ['error', 'logloss']]
num_iteration_history = []
# If more than one metrics is given, early stopping should use the last metric
for i, m in enumerate(metrics):
result = xgb.cv(params, dm, num_boost_round=1000, nfold=5, stratified=True,
metrics=m, early_stopping_rounds=20, seed=42)
num_iteration_history.append(len(result))
df = result['test-{}-mean'.format(m[-1])]
# When early stopping is invoked, the last metric should be as best it can be.
if m[-1] == 'auc':
assert np.all(df <= df.iloc[-1])
else:
assert np.all(df >= df.iloc[-1])
assert num_iteration_history[:3] == num_iteration_history[3:]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.parrots_wrapper import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .averaged_model import (BaseAveragedModel, ExponentialMovingAverage,
MomentumAnnealingEMA, StochasticWeightAverage)
from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from .base_module import BaseModule, ModuleDict, ModuleList, Sequential
from .utils import detect_anomalous_params, merge_dict, stack_batch
from .wrappers import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel, is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper', 'BaseAveragedModel',
'StochasticWeightAverage', 'ExponentialMovingAverage',
'MomentumAnnealingEMA', 'BaseModel', 'BaseDataPreprocessor',
'ImgDataPreprocessor', 'MMSeparateDistributedDataParallel', 'BaseModule',
'stack_batch', 'merge_dict', 'detect_anomalous_params', 'ModuleList',
'ModuleDict', 'Sequential'
]
if digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
from .wrappers import MMFullyShardedDataParallel # noqa:F401
__all__.append('MMFullyShardedDataParallel')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.parrots_wrapper import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .averaged_model import (ExponentialMovingAverage, MomentumAnnealingEMA,
StochasticWeightAverage)
from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from .base_module import BaseModule, ModuleDict, ModuleList, Sequential
from .utils import detect_anomalous_params, merge_dict, stack_batch
from .wrappers import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel, is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper', 'StochasticWeightAverage',
'ExponentialMovingAverage', 'MomentumAnnealingEMA', 'BaseModel',
'BaseDataPreprocessor', 'ImgDataPreprocessor',
'MMSeparateDistributedDataParallel', 'BaseModule', 'stack_batch',
'merge_dict', 'detect_anomalous_params', 'ModuleList', 'ModuleDict',
'Sequential'
]
if digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
from .wrappers import MMFullyShardedDataParallel # noqa:F401
__all__.append('MMFullyShardedDataParallel')
|
_base_ = './fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import JsonGetValueTool, JsonListKeysTool
from langchain_community.tools.json.tool import JsonSpec
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"JsonSpec": "langchain_community.tools.json.tool",
"JsonListKeysTool": "langchain_community.tools",
"JsonGetValueTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"JsonGetValueTool",
"JsonListKeysTool",
"JsonSpec",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import JsonGetValueTool, JsonListKeysTool
from langchain_community.tools.json.tool import JsonSpec
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"JsonSpec": "langchain_community.tools.json.tool",
"JsonListKeysTool": "langchain_community.tools",
"JsonGetValueTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"JsonSpec",
"JsonListKeysTool",
"JsonGetValueTool",
]
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
class LayerNorm(Module):
config_keys: list[str] = ["dimension"]
def __init__(self, dimension: int):
super().__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: dict[str, Tensor]):
features["sentence_embedding"] = self.norm(features["sentence_embedding"])
return features
def get_sentence_embedding_dimension(self):
return self.dimension
def save(self, output_path, safe_serialization: bool = True) -> None:
self.save_config(output_path)
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super().__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: dict[str, Tensor]):
features["sentence_embedding"] = self.norm(features["sentence_embedding"])
return features
def get_sentence_embedding_dimension(self):
return self.dimension
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dimension": self.dimension}, fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = LayerNorm(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
__version__ = '0.12.9'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.12.8'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for docs in self.streamer.stream_docs(
docs=DocumentArray([Document(text=text)]),
exec_endpoint='/',
):
doc = docs[0]
return {'text': doc.text, 'tags': doc.tags}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for docs in self.streamer.stream_docs(
docs=DocumentArray([Document(text=text)]),
exec_endpoint='/',
):
doc = docs[0]
return {'text': doc.text, 'tags': doc.tags}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, Type, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import trimesh
from pydantic import BaseConfig
from pydantic.fields import ModelField
MESH_FILE_FORMATS = ('obj', 'glb', 'ply')
T = TypeVar('T', bound='Url3D')
@_register_proto(proto_type_name='url3d')
class Url3D(AnyUrl, ABC):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh or point cloud information.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_mesh_extension = any(url.endswith(ext) for ext in MESH_FILE_FORMATS)
if not has_mesh_extension:
raise ValueError(
f'{cls.__name__} must have one of the following extensions:'
f'{MESH_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def _load_trimesh_instance(
self: T,
force: Optional[str] = None,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""
Load the data from the url into a trimesh.Mesh or trimesh.Scene object.
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh.
For 'scene' try to coerce everything into a scene.
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
if not trimesh_args:
trimesh_args = {}
scheme = urllib.parse.urlparse(self).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self, force=force, skip_materials=skip_materials, **trimesh_args)
return mesh
|
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, Type, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
import trimesh
from pydantic import BaseConfig
from pydantic.fields import ModelField
MESH_FILE_FORMATS = ('obj', 'glb', 'ply')
T = TypeVar('T', bound='Url3D')
@_register_proto(proto_type_name='url3d')
class Url3D(AnyUrl, ABC):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh or point cloud information.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_mesh_extension = any(url.endswith(ext) for ext in MESH_FILE_FORMATS)
if not has_mesh_extension:
raise ValueError(
f'{cls.__name__} must have one of the following extensions:'
f'{MESH_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def _load_trimesh_instance(
self: T,
force: Optional[str] = None,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""
Load the data from the url into a trimesh.Mesh or trimesh.Scene object.
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh.
For 'scene' try to coerce everything into a scene.
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
import trimesh
if not trimesh_args:
trimesh_args = {}
scheme = urllib.parse.urlparse(self).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self, force=force, skip_materials=skip_materials, **trimesh_args)
return mesh
|
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import (AspectRatioBatchSampler,
MultiDataAspectRatioBatchSampler,
TrackAspectRatioBatchSampler)
from .class_aware_sampler import ClassAwareSampler
from .multi_data_sampler import MultiDataSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
from .track_img_sampler import TrackImgSampler
__all__ = [
'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'TrackImgSampler',
'TrackAspectRatioBatchSampler', 'MultiDataSampler',
'MultiDataAspectRatioBatchSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import (AspectRatioBatchSampler,
TrackAspectRatioBatchSampler)
from .class_aware_sampler import ClassAwareSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
from .track_img_sampler import TrackImgSampler
__all__ = [
'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'TrackImgSampler',
'TrackAspectRatioBatchSampler'
]
|
import asyncio
from datetime import datetime, timedelta, timezone
from typing import Any
import feedparser
import pydantic
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class RSSEntry(pydantic.BaseModel):
title: str
link: str
description: str
pub_date: datetime
author: str
categories: list[str]
class ReadRSSFeedBlock(Block):
class Input(BlockSchema):
rss_url: str = SchemaField(
description="The URL of the RSS feed to read",
placeholder="https://example.com/rss",
)
time_period: int = SchemaField(
description="The time period to check in minutes relative to the run block runtime, e.g. 60 would check for new entries in the last hour.",
placeholder="1440",
default=1440,
)
polling_rate: int = SchemaField(
description="The number of seconds to wait between polling attempts.",
placeholder="300",
)
run_continuously: bool = SchemaField(
description="Whether to run the block continuously or just once.",
default=True,
)
class Output(BlockSchema):
entry: RSSEntry = SchemaField(description="The RSS item")
def __init__(self):
super().__init__(
id="5ebe6768-8e5d-41e3-9134-1c7bd89a8d52",
input_schema=ReadRSSFeedBlock.Input,
output_schema=ReadRSSFeedBlock.Output,
description="Reads RSS feed entries from a given URL.",
categories={BlockCategory.INPUT},
test_input={
"rss_url": "https://example.com/rss",
"time_period": 10_000_000,
"polling_rate": 1,
"run_continuously": False,
},
test_output=[
(
"entry",
RSSEntry(
title="Example RSS Item",
link="https://example.com/article",
description="This is an example RSS item description.",
pub_date=datetime(2023, 6, 23, 12, 30, 0, tzinfo=timezone.utc),
author="John Doe",
categories=["Technology", "News"],
),
),
],
test_mock={
"parse_feed": lambda *args, **kwargs: {
"entries": [
{
"title": "Example RSS Item",
"link": "https://example.com/article",
"summary": "This is an example RSS item description.",
"published_parsed": (2023, 6, 23, 12, 30, 0, 4, 174, 0),
"author": "John Doe",
"tags": [{"term": "Technology"}, {"term": "News"}],
}
]
}
},
)
@staticmethod
def parse_feed(url: str) -> dict[str, Any]:
return feedparser.parse(url) # type: ignore
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
keep_going = True
start_time = datetime.now(timezone.utc) - timedelta(
minutes=input_data.time_period
)
while keep_going:
keep_going = input_data.run_continuously
feed = self.parse_feed(input_data.rss_url)
for entry in feed["entries"]:
pub_date = datetime(*entry["published_parsed"][:6], tzinfo=timezone.utc)
if pub_date > start_time:
yield (
"entry",
RSSEntry(
title=entry["title"],
link=entry["link"],
description=entry.get("summary", ""),
pub_date=pub_date,
author=entry.get("author", ""),
categories=[tag["term"] for tag in entry.get("tags", [])],
),
)
await asyncio.sleep(input_data.polling_rate)
|
import time
from datetime import datetime, timedelta, timezone
from typing import Any
import feedparser
import pydantic
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class RSSEntry(pydantic.BaseModel):
title: str
link: str
description: str
pub_date: datetime
author: str
categories: list[str]
class ReadRSSFeedBlock(Block):
class Input(BlockSchema):
rss_url: str = SchemaField(
description="The URL of the RSS feed to read",
placeholder="https://example.com/rss",
)
time_period: int = SchemaField(
description="The time period to check in minutes relative to the run block runtime, e.g. 60 would check for new entries in the last hour.",
placeholder="1440",
default=1440,
)
polling_rate: int = SchemaField(
description="The number of seconds to wait between polling attempts.",
placeholder="300",
)
run_continuously: bool = SchemaField(
description="Whether to run the block continuously or just once.",
default=True,
)
class Output(BlockSchema):
entry: RSSEntry = SchemaField(description="The RSS item")
def __init__(self):
super().__init__(
id="5ebe6768-8e5d-41e3-9134-1c7bd89a8d52",
input_schema=ReadRSSFeedBlock.Input,
output_schema=ReadRSSFeedBlock.Output,
description="Reads RSS feed entries from a given URL.",
categories={BlockCategory.INPUT},
test_input={
"rss_url": "https://example.com/rss",
"time_period": 10_000_000,
"polling_rate": 1,
"run_continuously": False,
},
test_output=[
(
"entry",
RSSEntry(
title="Example RSS Item",
link="https://example.com/article",
description="This is an example RSS item description.",
pub_date=datetime(2023, 6, 23, 12, 30, 0, tzinfo=timezone.utc),
author="John Doe",
categories=["Technology", "News"],
),
),
],
test_mock={
"parse_feed": lambda *args, **kwargs: {
"entries": [
{
"title": "Example RSS Item",
"link": "https://example.com/article",
"summary": "This is an example RSS item description.",
"published_parsed": (2023, 6, 23, 12, 30, 0, 4, 174, 0),
"author": "John Doe",
"tags": [{"term": "Technology"}, {"term": "News"}],
}
]
}
},
)
@staticmethod
def parse_feed(url: str) -> dict[str, Any]:
return feedparser.parse(url) # type: ignore
def run(self, input_data: Input, **kwargs) -> BlockOutput:
keep_going = True
start_time = datetime.now(timezone.utc) - timedelta(
minutes=input_data.time_period
)
while keep_going:
keep_going = input_data.run_continuously
feed = self.parse_feed(input_data.rss_url)
for entry in feed["entries"]:
pub_date = datetime(*entry["published_parsed"][:6], tzinfo=timezone.utc)
if pub_date > start_time:
yield (
"entry",
RSSEntry(
title=entry["title"],
link=entry["link"],
description=entry.get("summary", ""),
pub_date=pub_date,
author=entry.get("author", ""),
categories=[tag["term"] for tag in entry.get("tags", [])],
),
)
time.sleep(input_data.polling_rate)
|
"""Utilities for image processing."""
from typing import Any
def __getattr__(name: str) -> Any:
if name in {"encode_image", "image_to_data_url"}:
msg = (
f"'{name}' has been removed for security reasons.\n\n"
f"Usage of this utility in environments with user-input paths is a "
f"security vulnerability. Out of an abundance of caution, the utility "
f"has been removed to prevent possible misuse."
)
raise ValueError(msg)
raise AttributeError(name)
|
"""Utilities for image processing."""
from typing import Any
def __getattr__(name: str) -> Any:
if name in ("encode_image", "image_to_data_url"):
msg = (
f"'{name}' has been removed for security reasons.\n\n"
f"Usage of this utility in environments with user-input paths is a "
f"security vulnerability. Out of an abundance of caution, the utility "
f"has been removed to prevent possible misuse."
)
raise ValueError(msg)
raise AttributeError(name)
|
"""Run smoke tests"""
import os
from pathlib import Path
import torch
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision useable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.ndim != 3 or img_jpg.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.ndim != 3 or img_png.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_resnet50_classify() -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg"))
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name}: {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import os
import torchvision
from torchvision.io import read_image
image_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "assets", "encode_jpeg", "grace_hopper_517x606.jpg"
)
print("torchvision version is ", torchvision.__version__)
img = read_image(image_path)
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainingArguments
from sentence_transformers.cross_encoder.evaluation import CrossEncoderClassificationEvaluator
from sentence_transformers.cross_encoder.losses import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 1 label
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, or rerankers like Alibaba-NLP/gte-reranker-modernbert-base
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=1)
# 2. Load the Quora duplicates dataset: https://huggingface.co/datasets/sentence-transformers/quora-duplicates
logging.info("Read quora-duplicates train dataset")
dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train")
eval_dataset = dataset.select(range(10_000))
test_dataset = dataset.select(range(10_000, 20_000))
train_dataset = dataset.select(range(20_000, len(dataset)))
logging.info(train_dataset)
logging.info(eval_dataset)
logging.info(test_dataset)
# 3. Define our training loss, we use one that accepts pairs with a binary label
loss = BinaryCrossEntropyLoss(model)
# 4. Before and during training, we use CrossEncoderClassificationEvaluator to measure the performance on the dev set
dev_cls_evaluator = CrossEncoderClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
labels=eval_dataset["label"],
name="quora-duplicates-dev",
)
dev_cls_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-quora-duplicates"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CrossEncoderClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
labels=eval_dataset["label"],
name="quora-duplicates-test",
)
test_cls_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainingArguments
from sentence_transformers.cross_encoder.evaluation import CEClassificationEvaluator
from sentence_transformers.cross_encoder.losses import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 1 label
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, or rerankers like Alibaba-NLP/gte-reranker-modernbert-base
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=1)
# 2. Load the Quora duplicates dataset: https://huggingface.co/datasets/sentence-transformers/quora-duplicates
logging.info("Read quora-duplicates train dataset")
dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train")
eval_dataset = dataset.select(range(10_000))
test_dataset = dataset.select(range(10_000, 20_000))
train_dataset = dataset.select(range(20_000, len(dataset)))
logging.info(train_dataset)
logging.info(eval_dataset)
logging.info(test_dataset)
# 3. Define our training loss, we use one that accepts pairs with a binary label
loss = BinaryCrossEntropyLoss(model)
# 4. Before and during training, we use CEClassificationEvaluator to measure the performance on the dev set
dev_cls_evaluator = CEClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
labels=eval_dataset["label"],
name="quora-duplicates-dev",
)
dev_cls_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-quora-duplicates"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CEClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
labels=eval_dataset["label"],
name="quora-duplicates-test",
)
test_cls_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
"""
A demo for multi-output regression
==================================
The demo is adopted from scikit-learn:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_random_forest_regression_multioutput.html#sphx-glr-auto-examples-ensemble-plot-random-forest-regression-multioutput-py
See :doc:`/tutorials/multioutput` for more information.
.. note::
The feature is experimental. For the `multi_output_tree` strategy, many features are
missing.
"""
import argparse
from typing import Dict, List, Tuple
import numpy as np
from matplotlib import pyplot as plt
import xgboost as xgb
def plot_predt(y: np.ndarray, y_predt: np.ndarray, name: str) -> None:
s = 25
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, edgecolor="black", label="data")
plt.scatter(
y_predt[:, 0], y_predt[:, 1], c="cornflowerblue", s=s, edgecolor="black"
)
plt.xlim([-1, 2])
plt.ylim([-1, 2])
plt.show()
def gen_circle() -> Tuple[np.ndarray, np.ndarray]:
"Generate a sample dataset that y is a 2 dim circle."
rng = np.random.RandomState(1994)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += 0.5 - rng.rand(20, 2)
y = y - y.min()
y = y / y.max()
return X, y
def rmse_model(plot_result: bool, strategy: str) -> None:
"""Draw a circle with 2-dim coordinate as target variables."""
X, y = gen_circle()
# Train a regressor on it
reg = xgb.XGBRegressor(
tree_method="hist",
n_estimators=128,
n_jobs=16,
max_depth=8,
multi_strategy=strategy,
subsample=0.6,
)
reg.fit(X, y, eval_set=[(X, y)])
y_predt = reg.predict(X)
if plot_result:
plot_predt(y, y_predt, "multi")
def custom_rmse_model(plot_result: bool, strategy: str) -> None:
"""Train using Python implementation of Squared Error."""
def gradient(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
"""Compute the gradient squared error."""
y = dtrain.get_label().reshape(predt.shape)
return predt - y
def hessian(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
"""Compute the hessian for squared error."""
return np.ones(predt.shape)
def squared_log(
predt: np.ndarray, dtrain: xgb.DMatrix
) -> Tuple[np.ndarray, np.ndarray]:
grad = gradient(predt, dtrain)
hess = hessian(predt, dtrain)
# both numpy.ndarray and cupy.ndarray works.
return grad, hess
def rmse(predt: np.ndarray, dtrain: xgb.DMatrix) -> Tuple[str, float]:
y = dtrain.get_label().reshape(predt.shape)
v = np.sqrt(np.sum(np.power(y - predt, 2)))
return "PyRMSE", v
X, y = gen_circle()
Xy = xgb.DMatrix(X, y)
results: Dict[str, Dict[str, List[float]]] = {}
# Make sure the `num_target` is passed to XGBoost when custom objective is used.
# When builtin objective is used, XGBoost can figure out the number of targets
# automatically.
booster = xgb.train(
{
"tree_method": "hist",
"num_target": y.shape[1],
"multi_strategy": strategy,
},
dtrain=Xy,
num_boost_round=128,
obj=squared_log,
evals=[(Xy, "Train")],
evals_result=results,
custom_metric=rmse,
)
y_predt = booster.inplace_predict(X)
if plot_result:
plot_predt(y, y_predt, "multi")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--plot", choices=[0, 1], type=int, default=1)
args = parser.parse_args()
# Train with builtin RMSE objective
# - One model per output.
rmse_model(args.plot == 1, "one_output_per_tree")
# - One model for all outputs, this is still working in progress, many features are
# missing.
rmse_model(args.plot == 1, "multi_output_tree")
# Train with custom objective.
# - One model per output.
custom_rmse_model(args.plot == 1, "one_output_per_tree")
# - One model for all outputs, this is still working in progress, many features are
# missing.
custom_rmse_model(args.plot == 1, "multi_output_tree")
|
"""
A demo for multi-output regression
==================================
The demo is adopted from scikit-learn:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_random_forest_regression_multioutput.html#sphx-glr-auto-examples-ensemble-plot-random-forest-regression-multioutput-py
See :doc:`/tutorials/multioutput` for more information.
.. note::
The feature is experimental. For the `multi_output_tree` strategy, many features are
missing.
"""
import argparse
from typing import Dict, List, Tuple
import numpy as np
from matplotlib import pyplot as plt
import xgboost as xgb
def plot_predt(y: np.ndarray, y_predt: np.ndarray, name: str) -> None:
s = 25
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, edgecolor="black", label="data")
plt.scatter(
y_predt[:, 0], y_predt[:, 1], c="cornflowerblue", s=s, edgecolor="black"
)
plt.xlim([-1, 2])
plt.ylim([-1, 2])
plt.show()
def gen_circle() -> Tuple[np.ndarray, np.ndarray]:
"Generate a sample dataset that y is a 2 dim circle."
rng = np.random.RandomState(1994)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += 0.5 - rng.rand(20, 2)
y = y - y.min()
y = y / y.max()
return X, y
def rmse_model(plot_result: bool, strategy: str) -> None:
"""Draw a circle with 2-dim coordinate as target variables."""
X, y = gen_circle()
# Train a regressor on it
reg = xgb.XGBRegressor(
tree_method="hist",
n_estimators=128,
n_jobs=16,
max_depth=8,
multi_strategy=strategy,
subsample=0.6,
)
reg.fit(X, y, eval_set=[(X, y)])
y_predt = reg.predict(X)
if plot_result:
plot_predt(y, y_predt, "multi")
def custom_rmse_model(plot_result: bool, strategy: str) -> None:
"""Train using Python implementation of Squared Error."""
# As the experimental support status, custom objective doesn't support matrix as
# gradient and hessian, which will be changed in future release.
def gradient(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
"""Compute the gradient squared error."""
y = dtrain.get_label().reshape(predt.shape)
return (predt - y).reshape(y.size)
def hessian(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
"""Compute the hessian for squared error."""
return np.ones(predt.shape).reshape(predt.size)
def squared_log(
predt: np.ndarray, dtrain: xgb.DMatrix
) -> Tuple[np.ndarray, np.ndarray]:
grad = gradient(predt, dtrain)
hess = hessian(predt, dtrain)
return grad, hess
def rmse(predt: np.ndarray, dtrain: xgb.DMatrix) -> Tuple[str, float]:
y = dtrain.get_label().reshape(predt.shape)
v = np.sqrt(np.sum(np.power(y - predt, 2)))
return "PyRMSE", v
X, y = gen_circle()
Xy = xgb.DMatrix(X, y)
results: Dict[str, Dict[str, List[float]]] = {}
# Make sure the `num_target` is passed to XGBoost when custom objective is used.
# When builtin objective is used, XGBoost can figure out the number of targets
# automatically.
booster = xgb.train(
{
"tree_method": "hist",
"num_target": y.shape[1],
"multi_strategy": strategy,
},
dtrain=Xy,
num_boost_round=128,
obj=squared_log,
evals=[(Xy, "Train")],
evals_result=results,
custom_metric=rmse,
)
y_predt = booster.inplace_predict(X)
if plot_result:
plot_predt(y, y_predt, "multi")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--plot", choices=[0, 1], type=int, default=1)
args = parser.parse_args()
# Train with builtin RMSE objective
# - One model per output.
rmse_model(args.plot == 1, "one_output_per_tree")
# - One model for all outputs, this is still working in progress, many features are
# missing.
rmse_model(args.plot == 1, "multi_output_tree")
# Train with custom objective.
# - One model per output.
custom_rmse_model(args.plot == 1, "one_output_per_tree")
# - One model for all outputs, this is still working in progress, many features are
# missing.
custom_rmse_model(args.plot == 1, "multi_output_tree")
|
"""Shopify tool spec."""
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class ShopifyToolSpec(BaseToolSpec):
"""Shopify tool spec."""
spec_functions = ["run_graphql_query"]
def __init__(self, shop_url: str, api_version: str, admin_api_key: str):
# Currently only supports Admin API auth
# https://shopify.dev/docs/apps/auth/admin-app-access-tokens
from shopify import Session, ShopifyResource
session = Session(shop_url, api_version, admin_api_key)
ShopifyResource.activate_session(session)
def run_graphql_query(self, graphql_query: str):
"""
Run a GraphQL query against the Shopify Admin API.
Example graphql_query: {
products (first: 3) {
edges {
node {
id
title
handle
}
}
}
}
providing this query would return the id, title and handle of the first 3 products
"""
from shopify import GraphQL
return GraphQL().execute(graphql_query)
|
"""Shopify tool spec."""
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class ShopifyToolSpec(BaseToolSpec):
"""Shopify tool spec."""
spec_functions = ["run_graphql_query"]
def __init__(self, shop_url: str, api_version: str, admin_api_key: str):
# Currently only supports Admin API auth
# https://shopify.dev/docs/apps/auth/admin-app-access-tokens
from shopify import Session, ShopifyResource
session = Session(shop_url, api_version, admin_api_key)
ShopifyResource.activate_session(session)
def run_graphql_query(self, graphql_query: str):
"""
Run a GraphQL query against the Shopify Admin API.
Example graphql_query: {
products (first: 3) {
edges {
node {
id
title
handle
}
}
}
}
providing this query would return the id, title and handle of the first 3 products
"""
from shopify import GraphQL
return GraphQL().execute(graphql_query)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import extract_patches
from keras.src.ops.image import gaussian_blur
from keras.src.ops.image import hsv_to_rgb
from keras.src.ops.image import map_coordinates
from keras.src.ops.image import pad_images
from keras.src.ops.image import perspective_transform
from keras.src.ops.image import resize
from keras.src.ops.image import rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import extract_patches
from keras.src.ops.image import hsv_to_rgb
from keras.src.ops.image import map_coordinates
from keras.src.ops.image import pad_images
from keras.src.ops.image import perspective_transform
from keras.src.ops.image import resize
from keras.src.ops.image import rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv
|
from datetime import datetime
import pytest
from jina import Document, DocumentArray, Flow
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_invalid_input_raise(protocol):
f = Flow(protocol=protocol).add()
with pytest.raises(BaseException):
with f:
da = DocumentArray([Document(text='hello', tags={'date': datetime.now()})])
f.post(on='/', inputs=da) # process should stop here and raise an exception
|
from datetime import datetime
import pytest
from jina import Document, DocumentArray, Flow
class MyOwnException(Exception):
pass
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_invalid_input_raise(protocol):
f = Flow(protocol=protocol).add()
try:
with f:
da = DocumentArray([Document(text='hello', tags={'date': datetime.now()})])
try:
f.post(
on='/', inputs=da
) # process should stop here and raise an exception
except Exception:
raise MyOwnException
assert False
except MyOwnException:
pass
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.40%
Model Anchor Sparsity: Active Dimensions: 103.0, Sparsity Ratio: 0.9966
Model Positive Sparsity: Active Dimensions: 67.4, Sparsity Ratio: 0.9978
Model Negative Sparsity: Active Dimensions: 65.9, Sparsity Ratio: 0.9978
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8540
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
Model Anchor Sparsity: Active Dimensions: 105.5, Sparsity Ratio: 0.9965
Model Positive Sparsity: Active Dimensions: 69.8, Sparsity Ratio: 0.9977
Model Negative Sparsity: Active Dimensions: 68.6, Sparsity Ratio: 0.9978
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
"""Tests for the minimum dependencies in README.rst and pyproject.toml"""
import os
import re
from collections import defaultdict
from pathlib import Path
import pytest
import sklearn
from sklearn._min_dependencies import dependent_packages
from sklearn.utils.fixes import parse_version
min_depencies_tag_to_packages_without_version = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
min_depencies_tag_to_packages_without_version[extra].append(package)
min_dependencies_tag_to_pyproject_section = {
"build": "build-system.requires",
"install": "project.dependencies",
}
for tag in min_depencies_tag_to_packages_without_version:
min_dependencies_tag_to_pyproject_section[tag] = (
f"project.optional-dependencies.{tag}"
)
def test_min_dependencies_readme():
# Test that the minimum dependencies in the README.rst file are
# consistent with the minimum dependencies defined at the file:
# sklearn/_min_dependencies.py
pattern = re.compile(
r"(\.\. \|)"
r"(([A-Za-z]+\-?)+)"
r"(MinVersion\| replace::)"
r"( [0-9]+\.[0-9]+(\.[0-9]+)?)"
)
readme_path = Path(sklearn.__file__).parent.parent
readme_file = readme_path / "README.rst"
if not os.path.exists(readme_file):
# Skip the test if the README.rst file is not available.
# For instance, when installing scikit-learn from wheels
pytest.skip("The README.rst file is not available.")
with readme_file.open("r") as f:
for line in f:
matched = pattern.match(line)
if not matched:
continue
package, version = matched.group(2), matched.group(5)
package = package.lower()
if package in dependent_packages:
version = parse_version(version)
min_version = parse_version(dependent_packages[package][0])
assert version == min_version, f"{package} has a mismatched version"
def check_pyproject_section(
pyproject_section, min_dependencies_tag, skip_version_check_for=None
):
# tomllib is available in Python 3.11
tomllib = pytest.importorskip("tomllib")
if skip_version_check_for is None:
skip_version_check_for = []
expected_packages = min_depencies_tag_to_packages_without_version[
min_dependencies_tag
]
root_directory = Path(sklearn.__file__).parent.parent
pyproject_toml_path = root_directory / "pyproject.toml"
if not pyproject_toml_path.exists():
# Skip the test if the pyproject.toml file is not available.
# For instance, when installing scikit-learn from wheels
pytest.skip("pyproject.toml is not available.")
with pyproject_toml_path.open("rb") as f:
pyproject_toml = tomllib.load(f)
pyproject_section_keys = pyproject_section.split(".")
info = pyproject_toml
for key in pyproject_section_keys:
info = info[key]
pyproject_build_min_versions = {}
for requirement in info:
if ">=" in requirement:
package, version = requirement.split(">=")
elif "==" in requirement:
package, version = requirement.split("==")
else:
raise NotImplementedError(
f"{requirement} not supported yet in this test. "
"Only >= and == are supported for version requirements"
)
pyproject_build_min_versions[package] = version
assert sorted(pyproject_build_min_versions) == sorted(expected_packages)
for package, version in pyproject_build_min_versions.items():
version = parse_version(version)
expected_min_version = parse_version(dependent_packages[package][0])
if package in skip_version_check_for:
continue
assert version == expected_min_version, f"{package} has a mismatched version"
@pytest.mark.parametrize(
"min_dependencies_tag, pyproject_section",
min_dependencies_tag_to_pyproject_section.items(),
)
def test_min_dependencies_pyproject_toml(pyproject_section, min_dependencies_tag):
"""Check versions in pyproject.toml is consistent with _min_dependencies."""
# NumPy is more complex because build-time (>=1.25) and run-time (>=1.19.5)
# requirement currently don't match
skip_version_check_for = ["numpy"] if min_dependencies_tag == "build" else None
check_pyproject_section(
pyproject_section,
min_dependencies_tag,
skip_version_check_for=skip_version_check_for,
)
|
"""Tests for the minimum dependencies in README.rst and pyproject.toml"""
import os
import re
from collections import defaultdict
from pathlib import Path
import pytest
import sklearn
from sklearn._min_dependencies import dependent_packages
from sklearn.utils.fixes import parse_version
min_depencies_tag_to_packages_without_version = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
min_depencies_tag_to_packages_without_version[extra].append(package)
min_dependencies_tag_to_pyproject_section = {
"build": "build-system.requires",
"install": "project.dependencies",
}
for tag in min_depencies_tag_to_packages_without_version:
min_dependencies_tag_to_pyproject_section[tag] = (
f"project.optional-dependencies.{tag}"
)
def test_min_dependencies_readme():
# Test that the minimum dependencies in the README.rst file are
# consistent with the minimum dependencies defined at the file:
# sklearn/_min_dependencies.py
pattern = re.compile(
r"(\.\. \|)"
+ r"(([A-Za-z]+\-?)+)"
+ r"(MinVersion\| replace::)"
+ r"( [0-9]+\.[0-9]+(\.[0-9]+)?)"
)
readme_path = Path(sklearn.__file__).parent.parent
readme_file = readme_path / "README.rst"
if not os.path.exists(readme_file):
# Skip the test if the README.rst file is not available.
# For instance, when installing scikit-learn from wheels
pytest.skip("The README.rst file is not available.")
with readme_file.open("r") as f:
for line in f:
matched = pattern.match(line)
if not matched:
continue
package, version = matched.group(2), matched.group(5)
package = package.lower()
if package in dependent_packages:
version = parse_version(version)
min_version = parse_version(dependent_packages[package][0])
assert version == min_version, f"{package} has a mismatched version"
def check_pyproject_section(
pyproject_section, min_dependencies_tag, skip_version_check_for=None
):
# tomllib is available in Python 3.11
tomllib = pytest.importorskip("tomllib")
if skip_version_check_for is None:
skip_version_check_for = []
expected_packages = min_depencies_tag_to_packages_without_version[
min_dependencies_tag
]
root_directory = Path(sklearn.__file__).parent.parent
pyproject_toml_path = root_directory / "pyproject.toml"
if not pyproject_toml_path.exists():
# Skip the test if the pyproject.toml file is not available.
# For instance, when installing scikit-learn from wheels
pytest.skip("pyproject.toml is not available.")
with pyproject_toml_path.open("rb") as f:
pyproject_toml = tomllib.load(f)
pyproject_section_keys = pyproject_section.split(".")
info = pyproject_toml
for key in pyproject_section_keys:
info = info[key]
pyproject_build_min_versions = {}
for requirement in info:
if ">=" in requirement:
package, version = requirement.split(">=")
elif "==" in requirement:
package, version = requirement.split("==")
else:
raise NotImplementedError(
f"{requirement} not supported yet in this test. "
"Only >= and == are supported for version requirements"
)
pyproject_build_min_versions[package] = version
assert sorted(pyproject_build_min_versions) == sorted(expected_packages)
for package, version in pyproject_build_min_versions.items():
version = parse_version(version)
expected_min_version = parse_version(dependent_packages[package][0])
if package in skip_version_check_for:
continue
assert version == expected_min_version, f"{package} has a mismatched version"
@pytest.mark.parametrize(
"min_dependencies_tag, pyproject_section",
min_dependencies_tag_to_pyproject_section.items(),
)
def test_min_dependencies_pyproject_toml(pyproject_section, min_dependencies_tag):
"""Check versions in pyproject.toml is consistent with _min_dependencies."""
# NumPy is more complex because build-time (>=1.25) and run-time (>=1.19.5)
# requirement currently don't match
skip_version_check_for = ["numpy"] if min_dependencies_tag == "build" else None
check_pyproject_section(
pyproject_section,
min_dependencies_tag,
skip_version_check_for=skip_version_check_for,
)
|
"""Prompts for scoring the outputs of a models for a given question.
This prompt is used to score the responses and evaluate how it follows the instructions
and answers the question. The prompt is based on the paper from
Zheng, et. al. https://arxiv.org/abs/2306.05685
"""
from langchain_core.prompts.chat import ChatPromptTemplate
SYSTEM_MESSAGE = "You are a helpful assistant."
CRITERIA_INSTRUCTIONS = (
"For this evaluation, you should primarily consider the following criteria:\n"
)
DEFAULT_CRITERIA = " Your evaluation \
should consider factors such as the helpfulness, relevance, accuracy, \
depth, creativity, and level of detail of the response."
SCORING_TEMPLATE = ChatPromptTemplate.from_messages(
[
("system", SYSTEM_MESSAGE),
(
"human",
'[Instruction]\nPlease act as an impartial judge \
and evaluate the quality of the response provided by an AI \
assistant to the user question displayed below. {criteria}Begin your evaluation \
by providing a short explanation. Be as objective as possible. \
After providing your explanation, you must rate the response on a scale of 1 to 10 \
by strictly following this format: "[[rating]]", for example: "Rating: [[5]]".\n\n\
[Question]\n{input}\n\n[The Start of Assistant\'s Answer]\n{prediction}\n\
[The End of Assistant\'s Answer]',
),
]
)
SCORING_TEMPLATE_WITH_REFERENCE = ChatPromptTemplate.from_messages(
[
("system", SYSTEM_MESSAGE),
(
"human",
"[Instruction]\nPlease act as an impartial judge \
and evaluate the quality of the response provided by an AI \
assistant to the user question displayed below. {criteria}"
'[Ground truth]\n{reference}\nBegin your evaluation \
by providing a short explanation. Be as objective as possible. \
After providing your explanation, you must rate the response on a scale of 1 to 10 \
by strictly following this format: "[[rating]]", for example: "Rating: [[5]]".\n\n\
[Question]\n{input}\n\n[The Start of Assistant\'s Answer]\n{prediction}\n\
[The End of Assistant\'s Answer]',
),
]
)
|
"""Prompts for scoring the outputs of a models for a given question.
This prompt is used to score the responses and evaluate how it follows the instructions
and answers the question. The prompt is based on the paper from
Zheng, et. al. https://arxiv.org/abs/2306.05685
"""
# flake8: noqa
from langchain_core.prompts.chat import ChatPromptTemplate
SYSTEM_MESSAGE = "You are a helpful assistant."
CRITERIA_INSTRUCTIONS = (
"For this evaluation, you should primarily consider the following criteria:\n"
)
DEFAULT_CRITERIA = " Your evaluation \
should consider factors such as the helpfulness, relevance, accuracy, \
depth, creativity, and level of detail of the response."
SCORING_TEMPLATE = ChatPromptTemplate.from_messages(
[
("system", SYSTEM_MESSAGE),
(
"human",
'[Instruction]\nPlease act as an impartial judge \
and evaluate the quality of the response provided by an AI \
assistant to the user question displayed below. {criteria}Begin your evaluation \
by providing a short explanation. Be as objective as possible. \
After providing your explanation, you must rate the response on a scale of 1 to 10 \
by strictly following this format: "[[rating]]", for example: "Rating: [[5]]".\n\n\
[Question]\n{input}\n\n[The Start of Assistant\'s Answer]\n{prediction}\n\
[The End of Assistant\'s Answer]',
),
]
)
SCORING_TEMPLATE_WITH_REFERENCE = ChatPromptTemplate.from_messages(
[
("system", SYSTEM_MESSAGE),
(
"human",
"[Instruction]\nPlease act as an impartial judge \
and evaluate the quality of the response provided by an AI \
assistant to the user question displayed below. {criteria}"
'[Ground truth]\n{reference}\nBegin your evaluation \
by providing a short explanation. Be as objective as possible. \
After providing your explanation, you must rate the response on a scale of 1 to 10 \
by strictly following this format: "[[rating]]", for example: "Rating: [[5]]".\n\n\
[Question]\n{input}\n\n[The Start of Assistant\'s Answer]\n{prediction}\n\
[The End of Assistant\'s Answer]',
),
]
)
|
"""
Audio Datasets
==============
**Author**: `Moto Hira <moto@meta.com>`__
``torchaudio`` provides easy access to common, publicly accessible
datasets. Please refer to the official documentation for the list of
available datasets.
"""
import torch
import torchaudio
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
#
import os
import IPython
import matplotlib.pyplot as plt
_SAMPLE_DIR = "_assets"
YESNO_DATASET_PATH = os.path.join(_SAMPLE_DIR, "yes_no")
os.makedirs(YESNO_DATASET_PATH, exist_ok=True)
def plot_specgram(waveform, sample_rate, title="Spectrogram"):
waveform = waveform.numpy()
figure, ax = plt.subplots()
ax.specgram(waveform[0], Fs=sample_rate)
figure.suptitle(title)
figure.tight_layout()
######################################################################
# Here, we show how to use the
# :py:class:`torchaudio.datasets.YESNO` dataset.
#
dataset = torchaudio.datasets.YESNO(YESNO_DATASET_PATH, download=True)
######################################################################
#
i = 1
waveform, sample_rate, label = dataset[i]
plot_specgram(waveform, sample_rate, title=f"Sample {i}: {label}")
IPython.display.Audio(waveform, rate=sample_rate)
######################################################################
#
i = 3
waveform, sample_rate, label = dataset[i]
plot_specgram(waveform, sample_rate, title=f"Sample {i}: {label}")
IPython.display.Audio(waveform, rate=sample_rate)
######################################################################
#
i = 5
waveform, sample_rate, label = dataset[i]
plot_specgram(waveform, sample_rate, title=f"Sample {i}: {label}")
IPython.display.Audio(waveform, rate=sample_rate)
|
# -*- coding: utf-8 -*-
"""
Audio Datasets
==============
**Author**: `Moto Hira <moto@meta.com>`__
``torchaudio`` provides easy access to common, publicly accessible
datasets. Please refer to the official documentation for the list of
available datasets.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio
import torch
import torchaudio
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import os
import matplotlib.pyplot as plt
from IPython.display import Audio, display
_SAMPLE_DIR = "_assets"
YESNO_DATASET_PATH = os.path.join(_SAMPLE_DIR, "yes_no")
os.makedirs(YESNO_DATASET_PATH, exist_ok=True)
def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None):
waveform = waveform.numpy()
num_channels, _ = waveform.shape
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.show(block=False)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, _ = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
######################################################################
# Here, we show how to use the
# :py:class:`torchaudio.datasets.YESNO` dataset.
#
dataset = torchaudio.datasets.YESNO(YESNO_DATASET_PATH, download=True)
for i in [1, 3, 5]:
waveform, sample_rate, label = dataset[i]
plot_specgram(waveform, sample_rate, title=f"Sample {i}: {label}")
play_audio(waveform, sample_rate)
|
"""Test PandasDataframeParser"""
from typing import Any
import pandas as pd
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers.pandas_dataframe import PandasDataFrameOutputParser
df = pd.DataFrame(
{
"chicken": [1, 2, 3, 4],
"veggies": [5, 4, 3, 2],
"steak": [9, 8, 7, 6],
}
)
parser = PandasDataFrameOutputParser(dataframe=df)
# Test Invalid Column
def test_pandas_output_parser_col_no_array() -> None:
try:
parser.parse("column:num_legs")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Column with invalid array (above DataFrame max index)
def test_pandas_output_parser_col_oob() -> None:
try:
parser.parse("row:10")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Column with array [x]
def test_pandas_output_parser_col_first_elem() -> None:
expected_output = {"chicken": 1}
actual_output = parser.parse("column:chicken[0]")
assert actual_output == expected_output
# Test Column with array [x,y,z]
def test_pandas_output_parser_col_multi_elem() -> None:
expected_output = {"chicken": pd.Series([1, 2], name="chicken", dtype="int64")}
actual_output = parser.parse("column:chicken[0, 1]")
for key in actual_output:
assert expected_output["chicken"].equals(actual_output[key])
# Test Row with invalid row entry
def test_pandas_output_parser_row_no_array() -> None:
try:
parser.parse("row:5")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Row with valid row entry
def test_pandas_output_parser_row_first() -> None:
expected_output = {"1": pd.Series({"chicken": 2, "veggies": 4, "steak": 8})}
actual_output = parser.parse("row:1")
assert actual_output["1"].equals(expected_output["1"])
# Test Row with invalid col entry
def test_pandas_output_parser_row_no_column() -> None:
try:
parser.parse("row:1[num_legs]")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Row with valid col entry
def test_pandas_output_parser_row_col_1() -> None:
expected_output = {"1": 2}
actual_output = parser.parse("row:1[chicken]")
assert actual_output == expected_output
def test_pandas_output_parser_special_ops() -> None:
actual_output = [
{"mean": 3.0},
{"median": 3.0},
{"min": 2},
{"max": 4},
{"var": 1.0},
{"std": 1.0},
{"count": 3},
{"quantile": 3.0},
]
expected_output = [
parser.parse("mean:chicken[1..3]"),
parser.parse("median:chicken[1..3]"),
parser.parse("min:chicken[1..3]"),
parser.parse("max:chicken[1..3]"),
parser.parse("var:chicken[1..3]"),
parser.parse("std:chicken[1..3]"),
parser.parse("count:chicken[1..3]"),
parser.parse("quantile:chicken[1..3]"),
]
assert actual_output == expected_output
def test_pandas_output_parser_invalid_special_op() -> None:
try:
parser.parse("riemann_sum:chicken")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
def test_pandas_output_parser_output_type() -> None:
"""Test the output type of the pandas dataframe output parser is a pandas dataframe.""" # noqa: E501
assert parser.OutputType == dict[str, Any]
|
"""Test PandasDataframeParser"""
from typing import Any
import pandas as pd
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers.pandas_dataframe import PandasDataFrameOutputParser
df = pd.DataFrame(
{
"chicken": [1, 2, 3, 4],
"veggies": [5, 4, 3, 2],
"steak": [9, 8, 7, 6],
}
)
parser = PandasDataFrameOutputParser(dataframe=df)
# Test Invalid Column
def test_pandas_output_parser_col_no_array() -> None:
try:
parser.parse("column:num_legs")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Column with invalid array (above DataFrame max index)
def test_pandas_output_parser_col_oob() -> None:
try:
parser.parse("row:10")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Column with array [x]
def test_pandas_output_parser_col_first_elem() -> None:
expected_output = {"chicken": 1}
actual_output = parser.parse("column:chicken[0]")
assert actual_output == expected_output
# Test Column with array [x,y,z]
def test_pandas_output_parser_col_multi_elem() -> None:
expected_output = {"chicken": pd.Series([1, 2], name="chicken", dtype="int64")}
actual_output = parser.parse("column:chicken[0, 1]")
for key in actual_output.keys():
assert expected_output["chicken"].equals(actual_output[key])
# Test Row with invalid row entry
def test_pandas_output_parser_row_no_array() -> None:
try:
parser.parse("row:5")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Row with valid row entry
def test_pandas_output_parser_row_first() -> None:
expected_output = {"1": pd.Series({"chicken": 2, "veggies": 4, "steak": 8})}
actual_output = parser.parse("row:1")
assert actual_output["1"].equals(expected_output["1"])
# Test Row with invalid col entry
def test_pandas_output_parser_row_no_column() -> None:
try:
parser.parse("row:1[num_legs]")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Row with valid col entry
def test_pandas_output_parser_row_col_1() -> None:
expected_output = {"1": 2}
actual_output = parser.parse("row:1[chicken]")
assert actual_output == expected_output
def test_pandas_output_parser_special_ops() -> None:
actual_output = [
{"mean": 3.0},
{"median": 3.0},
{"min": 2},
{"max": 4},
{"var": 1.0},
{"std": 1.0},
{"count": 3},
{"quantile": 3.0},
]
expected_output = [
parser.parse("mean:chicken[1..3]"),
parser.parse("median:chicken[1..3]"),
parser.parse("min:chicken[1..3]"),
parser.parse("max:chicken[1..3]"),
parser.parse("var:chicken[1..3]"),
parser.parse("std:chicken[1..3]"),
parser.parse("count:chicken[1..3]"),
parser.parse("quantile:chicken[1..3]"),
]
assert actual_output == expected_output
def test_pandas_output_parser_invalid_special_op() -> None:
try:
parser.parse("riemann_sum:chicken")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
def test_pandas_output_parser_output_type() -> None:
"""Test the output type of the pandas dataframe output parser is a pandas dataframe.""" # noqa: E501
assert parser.OutputType == dict[str, Any]
|
"""Test ChatDeepSeek chat model."""
from typing import Optional
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_deepseek.chat_models import ChatDeepSeek
class TestChatDeepSeek(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[ChatDeepSeek]:
return ChatDeepSeek
@property
def chat_model_params(self) -> dict:
# These should be parameters used to initialize your integration for testing
return {
"model": "deepseek-chat",
"temperature": 0,
}
@property
def supports_json_mode(self) -> bool:
"""(bool) whether the chat model supports JSON mode."""
return True
@pytest.mark.xfail(reason="Not yet supported.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content() -> None:
"""Test reasoning content."""
chat_model = ChatDeepSeek(model="deepseek-reasoner")
response = chat_model.invoke("What is 3^3?")
assert response.content
assert response.additional_kwargs["reasoning_content"]
raise ValueError()
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content_streaming() -> None:
chat_model = ChatDeepSeek(model="deepseek-reasoner")
full: Optional[BaseMessageChunk] = None
for chunk in chat_model.stream("What is 3^3?"):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.additional_kwargs["reasoning_content"]
|
"""Test ChatDeepSeek chat model."""
from typing import Optional, Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_deepseek.chat_models import ChatDeepSeek
class TestChatDeepSeek(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[ChatDeepSeek]:
return ChatDeepSeek
@property
def chat_model_params(self) -> dict:
# These should be parameters used to initialize your integration for testing
return {
"model": "deepseek-chat",
"temperature": 0,
}
@property
def supports_json_mode(self) -> bool:
"""(bool) whether the chat model supports JSON mode."""
return True
@pytest.mark.xfail(reason="Not yet supported.")
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(model, my_adder_tool)
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content() -> None:
"""Test reasoning content."""
chat_model = ChatDeepSeek(model="deepseek-reasoner")
response = chat_model.invoke("What is 3^3?")
assert response.content
assert response.additional_kwargs["reasoning_content"]
raise ValueError()
@pytest.mark.xfail(reason="Takes > 30s to run.")
def test_reasoning_content_streaming() -> None:
chat_model = ChatDeepSeek(model="deepseek-reasoner")
full: Optional[BaseMessageChunk] = None
for chunk in chat_model.stream("What is 3^3?"):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.additional_kwargs["reasoning_content"]
|
import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import leaky_relu
class LeakyReLUTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_leaky_relu(self):
self.run_layer_test(
leaky_relu.LeakyReLU,
init_kwargs={
"negative_slope": 1,
},
input_shape=(2, 3, 4),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_leaky_relu_correctness(self):
leaky_relu_layer = leaky_relu.LeakyReLU(negative_slope=0.5)
input = np.array([-10, -5, 0.0, 5, 10])
expected_output = np.array([-5.0, -2.5, 0.0, 5.0, 10.0])
result = leaky_relu_layer(input)
self.assertAllClose(result, expected_output)
def test_invalid_usage(self):
with self.assertRaisesRegex(
ValueError,
"The negative_slope value of a Leaky ReLU layer cannot be None",
):
self.run_layer_test(
leaky_relu.LeakyReLU,
init_kwargs={"negative_slope": None},
input_shape=(2, 3, 4),
supports_masking=True,
)
|
import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import leaky_relu
class LeakyReLUTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_leaky_relu(self):
self.run_layer_test(
leaky_relu.LeakyReLU,
init_kwargs={
"negative_slope": 1,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
def test_leaky_relu_correctness(self):
leaky_relu_layer = leaky_relu.LeakyReLU(negative_slope=0.5)
input = np.array([-10, -5, 0.0, 5, 10])
expected_output = np.array([-5.0, -2.5, 0.0, 5.0, 10.0])
result = leaky_relu_layer(input)
self.assertAllClose(result, expected_output)
def test_invalid_usage(self):
with self.assertRaisesRegex(
ValueError,
"The negative_slope value of a Leaky ReLU layer cannot be None",
):
self.run_layer_test(
leaky_relu.LeakyReLU,
init_kwargs={"negative_slope": None},
input_shape=(2, 3, 4),
supports_masking=True,
)
|
import numpy as np
from mmdet.core.evaluation.mean_ap import (eval_map, tpfp_default,
tpfp_imagenet, tpfp_openimages)
det_bboxes = np.array([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
gt_bboxes = np.array([[0, 0, 10, 20], [0, 10, 10, 19], [10, 10, 20, 20]])
gt_ignore = np.array([[5, 5, 10, 20], [6, 10, 10, 19]])
def test_tpfp_imagenet():
result = tpfp_imagenet(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=True)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
result = tpfp_imagenet(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=False)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
def test_tpfp_default():
result = tpfp_default(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=True)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
result = tpfp_default(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=False)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
def test_eval_map():
# 2 image and 2 classes
det_results = [[det_bboxes, det_bboxes], [det_bboxes, det_bboxes]]
labels = np.array([0, 1, 1])
labels_ignore = np.array([0, 1])
gt_info = {
'bboxes': gt_bboxes,
'bboxes_ignore': gt_ignore,
'labels': labels,
'labels_ignore': labels_ignore
}
annotations = [gt_info, gt_info]
mean_ap, eval_results = eval_map(
det_results, annotations, use_legacy_coordinate=True)
assert 0.291 < mean_ap < 0.293
eval_map(det_results, annotations, use_legacy_coordinate=False)
assert 0.291 < mean_ap < 0.293
def test_tpfp_openimages():
det_bboxes = np.array([[10, 10, 15, 15, 1.0], [15, 15, 30, 30, 0.98],
[10, 10, 25, 25, 0.98], [28, 28, 35, 35, 0.97],
[30, 30, 51, 51, 0.96], [100, 110, 120, 130, 0.15]])
gt_bboxes = np.array([[10., 10., 30., 30.], [30., 30., 50., 50.]])
gt_groups_of = np.array([True, False], dtype=np.bool)
gt_ignore = np.zeros((0, 4))
# Open Images evaluation using group of.
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=True,
ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert tp.shape == (1, 4)
assert fp.shape == (1, 4)
assert cls_dets.shape == (4, 5)
assert (tp == np.array([[0, 1, 0, 1]])).all()
assert (fp == np.array([[1, 0, 1, 0]])).all()
cls_dets_gt = np.array([[28., 28., 35., 35., 0.97],
[30., 30., 51., 51., 0.96],
[100., 110., 120., 130., 0.15],
[10., 10., 15., 15., 1.]])
assert (cls_dets == cls_dets_gt).all()
# Open Images evaluation not using group of.
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=False,
ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert tp.shape == (1, 6)
assert fp.shape == (1, 6)
assert cls_dets.shape == (6, 5)
# Open Images evaluation using group of, and gt is all group of bboxes.
gt_groups_of = np.array([True, True], dtype=np.bool)
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=True,
ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert cls_dets.shape == (3, 5)
# Open Images evaluation with empty gt.
gt_bboxes = np.zeros((0, 4))
gt_groups_of = np.empty((0))
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=True,
ioa_thr=0.5)
fp = result[1]
assert (fp == np.array([[1, 1, 1, 1, 1, 1]])).all()
|
import numpy as np
from mmdet.core.evaluation.mean_ap import eval_map, tpfp_default, tpfp_imagenet
det_bboxes = np.array([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
gt_bboxes = np.array([[0, 0, 10, 20], [0, 10, 10, 19], [10, 10, 20, 20]])
gt_ignore = np.array([[5, 5, 10, 20], [6, 10, 10, 19]])
def test_tpfp_imagenet():
result = tpfp_imagenet(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=True)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
result = tpfp_imagenet(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=False)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
def test_tpfp_default():
result = tpfp_default(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=True)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
result = tpfp_default(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=False)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
def test_eval_map():
# 2 image and 2 classes
det_results = [[det_bboxes, det_bboxes], [det_bboxes, det_bboxes]]
labels = np.array([0, 1, 1])
labels_ignore = np.array([0, 1])
gt_info = {
'bboxes': gt_bboxes,
'bboxes_ignore': gt_ignore,
'labels': labels,
'labels_ignore': labels_ignore
}
annotations = [gt_info, gt_info]
mean_ap, eval_results = eval_map(
det_results, annotations, use_legacy_coordinate=True)
assert 0.291 < mean_ap < 0.293
eval_map(det_results, annotations, use_legacy_coordinate=False)
assert 0.291 < mean_ap < 0.293
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, List, Optional
import numpy as np
from annoy import AnnoyIndex
from jina import Document, DocumentArray, Executor, requests
from jina_commons import get_logger
from jina_commons.indexers.dump import import_vectors
class AnnoySearcher(Executor):
"""Annoy powered vector indexer
For more information about the Annoy supported parameters, please consult:
- https://github.com/spotify/annoy
.. note::
Annoy package dependency is only required at the query time.
"""
def __init__(
self,
default_top_k: int = 10,
metric: str = "euclidean",
num_trees: int = 10,
dump_path: Optional[str] = None,
default_traversal_paths: List[str] = ["r"],
is_distance: bool = False,
**kwargs,
):
"""
Initialize an AnnoyIndexer
:param default_top_k: get tok k vectors
:param metric: Metric can be "angular", "euclidean", "manhattan", "hamming", or "dot"
:param num_trees: builds a forest of n_trees trees. More trees gives higher precision when querying.
:param dump_path: the path to load ids and vecs
:param traverse_path: traverse path on docs, e.g. ['r'], ['c']
:param is_distance: Boolean flag that describes if distance metric need to be reinterpreted as similarities.
:param args:
:param kwargs:
"""
super().__init__(**kwargs)
self.default_top_k = default_top_k
self.metric = metric
self.num_trees = num_trees
self.default_traversal_paths = default_traversal_paths
self.is_distance = is_distance
self.logger = get_logger(self)
self._doc_id_to_offset = {}
dump_path = dump_path or kwargs.get("runtime_args", {}).get("dump_path", None)
if dump_path is not None:
self.logger.info('Start building "AnnoyIndexer" from dump data')
ids, vecs = import_vectors(dump_path, str(self.runtime_args.pea_id))
self._ids = np.array(list(ids))
self._vecs = np.array(list(vecs))
num_dim = self._vecs.shape[1]
self._indexer = AnnoyIndex(num_dim, self.metric)
self._load_index(self._ids, self._vecs)
self.logger.info("Done building Annoy index")
else:
self.logger.warning(
'No data loaded in "AnnoyIndexer". Use .rolling_update() to re-initialize it...'
)
def _load_index(self, ids, vecs):
for idx, v in enumerate(vecs):
self._indexer.add_item(idx, v.astype(np.float32))
self._doc_id_to_offset[ids[idx]] = idx
self._indexer.build(self.num_trees)
@requests(on="/search")
def search(self, docs: DocumentArray, parameters: Dict, **kwargs):
if not hasattr(self, "_indexer"):
self.logger.warning("Querying against an empty index")
return
traversal_paths = parameters.get(
"traversal_paths", self.default_traversal_paths
)
top_k = parameters.get("top_k", self.default_top_k)
for doc in docs.traverse_flat(traversal_paths):
indices, dists = self._indexer.get_nns_by_vector(
doc.embedding, top_k, include_distances=True
)
for idx, dist in zip(indices, dists):
match = Document(id=self._ids[idx], embedding=self._vecs[idx])
if self.is_distance:
if self.metric == "dot":
match.scores[self.metric] = 1 - dist
else:
match.scores[self.metric] = dist
else:
if self.metric == "dot":
match.scores[self.metric] = dist
elif self.metric == "angular" or self.metric == "hamming":
match.scores[self.metric] = 1 - dist
else:
match.scores[self.metric] = 1 / (1 + dist)
doc.matches.append(match)
@requests(on="/fill_embedding")
def fill_embedding(self, query_da: DocumentArray, **kwargs):
for doc in query_da:
doc_idx = self._doc_id_to_offset.get(doc.id)
if doc_idx is not None:
doc.embedding = np.array(self._indexer.get_item_vector(int(doc_idx)))
else:
self.logger.warning(f"Document {doc.id} not found in index")
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, List, Union, Dict
import numpy as np
from annoy import AnnoyIndex
from jina import Executor, requests, DocumentArray, Document
from jina_commons import get_logger
from jina_commons.indexers.dump import import_vectors
class AnnoySearcher(Executor):
"""Annoy powered vector indexer
For more information about the Annoy supported parameters, please consult:
- https://github.com/spotify/annoy
.. note::
Annoy package dependency is only required at the query time.
"""
def __init__(
self,
default_top_k: int = 10,
metric: str = 'euclidean',
num_trees: int = 10,
dump_path: Optional[str] = None,
default_traversal_paths: List[str] = ['r'],
is_distance: bool = False,
**kwargs,
):
"""
Initialize an AnnoyIndexer
:param default_top_k: get tok k vectors
:param metric: Metric can be "angular", "euclidean", "manhattan", "hamming", or "dot"
:param num_trees: builds a forest of n_trees trees. More trees gives higher precision when querying.
:param dump_path: the path to load ids and vecs
:param traverse_path: traverse path on docs, e.g. ['r'], ['c']
:param is_distance: Boolean flag that describes if distance metric need to be reinterpreted as similarities.
:param args:
:param kwargs:
"""
super().__init__(**kwargs)
self.default_top_k = default_top_k
self.metric = metric
self.num_trees = num_trees
self.default_traversal_paths = default_traversal_paths
self.is_distance = is_distance
self.logger = get_logger(self)
self._doc_id_to_offset = {}
dump_path = dump_path or kwargs.get('runtime_args', {}).get('dump_path', None)
if dump_path is not None:
self.logger.info('Start building "AnnoyIndexer" from dump data')
ids, vecs = import_vectors(dump_path, str(self.runtime_args.pea_id))
self._ids = np.array(list(ids))
self._vecs = np.array(list(vecs))
num_dim = self._vecs.shape[1]
self._indexer = AnnoyIndex(num_dim, self.metric)
self._load_index(self._ids, self._vecs)
self.logger.info('Done building Annoy index')
else:
self.logger.warning(
'No data loaded in "AnnoyIndexer". Use .rolling_update() to re-initialize it...'
)
def _load_index(self, ids, vecs):
for idx, v in enumerate(vecs):
self._indexer.add_item(idx, v.astype(np.float32))
self._doc_id_to_offset[ids[idx]] = idx
self._indexer.build(self.num_trees)
@requests(on='/search')
def search(self, docs: DocumentArray, parameters: Dict, **kwargs):
if not hasattr(self, '_indexer'):
self.logger.warning('Querying against an empty index')
return
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
top_k = parameters.get('top_k', self.default_top_k)
for doc in docs.traverse_flat(traversal_paths):
indices, dists = self._indexer.get_nns_by_vector(
doc.embedding, top_k, include_distances=True
)
for idx, dist in zip(indices, dists):
match = Document(id=self._ids[idx], embedding=self._vecs[idx])
if self.is_distance:
if self.metric == 'dot':
match.scores[self.metric] = 1 - dist
else:
match.scores[self.metric] = dist
else:
if self.metric == 'dot':
match.scores[self.metric] = dist
elif self.metric == 'angular' or self.metric == 'hamming':
match.scores[self.metric] = 1 - dist
else:
match.scores[self.metric] = 1 / (1 + dist)
doc.matches.append(match)
@requests(on='/fill_embedding')
def fill_embedding(self, query_da: DocumentArray, **kwargs):
for doc in query_da:
doc_idx = self._doc_id_to_offset.get(doc.id)
if doc_idx is not None:
doc.embedding = np.array(self._indexer.get_item_vector(int(doc_idx)))
else:
self.logger.warning(f'Document {doc.id} not found in index')
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import setup_cache_size_limit_of_dynamo
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
# When using PyTorch version >= 2.0.0, the `torch.distributed.launch`
# will pass the `--local-rank` parameter to `tools/train.py` instead
# of `--local_rank`.
parser.add_argument('--local_rank', '--local-rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# Reduce the number of repeated compilations and improve
# training speed.
setup_cache_size_limit_of_dynamo()
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
# When using PyTorch version >= 2.0.0, the `torch.distributed.launch`
# will pass the `--local-rank` parameter to `tools/train.py` instead
# of `--local_rank`.
parser.add_argument('--local_rank', '--local-rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
from langchain_core.callbacks.base import (
AsyncCallbackHandler,
BaseCallbackHandler,
BaseCallbackManager,
CallbackManagerMixin,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
__all__ = [
"AsyncCallbackHandler",
"BaseCallbackHandler",
"BaseCallbackManager",
"CallbackManagerMixin",
"ChainManagerMixin",
"LLMManagerMixin",
"RetrieverManagerMixin",
"RunManagerMixin",
"ToolManagerMixin",
]
|
from langchain_core.callbacks.base import (
AsyncCallbackHandler,
BaseCallbackHandler,
BaseCallbackManager,
CallbackManagerMixin,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
__all__ = [
"RetrieverManagerMixin",
"LLMManagerMixin",
"ChainManagerMixin",
"ToolManagerMixin",
"CallbackManagerMixin",
"RunManagerMixin",
"BaseCallbackHandler",
"AsyncCallbackHandler",
"BaseCallbackManager",
]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from spacy_text_encoder import SpacyTextEncoder
_EMBEDDING_DIM = 96
@pytest.fixture(scope='session')
def basic_encoder() -> SpacyTextEncoder:
return SpacyTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.__class__.__name__ == 'SpacyTextEncoder'
def test_encoding_cpu():
enc = SpacyTextEncoder(require_gpu=False)
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
enc = SpacyTextEncoder(require_gpu=True)
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'model_name, emb_dim',
[
('en_core_web_sm', 96),
('en_core_web_lg', 300),
('es_core_news_sm', 96),
],
)
def test_models(model_name: str, emb_dim: int):
encoder = SpacyTextEncoder(model_name)
input_data = DocumentArray([Document(text='hello world')])
encoder.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (emb_dim,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: SpacyTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: SpacyTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: SpacyTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from ...spacy_text_encoder import SpacyTextEncoder
_EMBEDDING_DIM = 96
@pytest.fixture(scope='session')
def basic_encoder() -> SpacyTextEncoder:
return SpacyTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.__class__.__name__ == 'SpacyTextEncoder'
def test_encoding_cpu():
enc = SpacyTextEncoder(require_gpu=False)
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
enc = SpacyTextEncoder(require_gpu=True)
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'model_name, emb_dim',
[
('en_core_web_sm', 96),
('en_core_web_lg', 300),
('es_core_news_sm', 96),
],
)
def test_models(model_name: str, emb_dim: int):
encoder = SpacyTextEncoder(model_name)
input_data = DocumentArray([Document(text='hello world')])
encoder.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (emb_dim,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: SpacyTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: SpacyTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: SpacyTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.3.2"
@keras_export("keras.version")
def version():
return __version__
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.3.1"
@keras_export("keras.version")
def version():
return __version__
|
"""
This scripts runs the evaluation (dev & test) for the AskUbuntu dataset
Usage:
python eval_askubuntu.py [sbert_model_name_or_path]
"""
import gzip
import logging
import os
import sys
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
model = SentenceTransformer(sys.argv[1])
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
training_corpus = os.path.join(askubuntu_folder, "train.unsupervised.txt")
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
logging.info("Dev performance before training")
dev_evaluator(model)
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Test performance before training")
test_evaluator(model)
|
"""
This scripts runs the evaluation (dev & test) for the AskUbuntu dataset
Usage:
python eval_askubuntu.py [sbert_model_name_or_path]
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
from sentence_transformers import util, evaluation
import logging
import os
import gzip
import sys
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
model = SentenceTransformer(sys.argv[1])
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
training_corpus = os.path.join(askubuntu_folder, "train.unsupervised.txt")
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
logging.info("Dev performance before training")
dev_evaluator(model)
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Test performance before training")
test_evaluator(model)
|
import gc
import unittest
from transformers import AutoModelForCausalLM, AutoTokenizer, CompressedTensorsConfig
from transformers.testing_utils import backend_empty_cache, require_compressed_tensors, require_torch, torch_device
from transformers.utils import is_torch_available
if is_torch_available():
import torch
@require_compressed_tensors
@require_torch
class CompressedTensorsTest(unittest.TestCase):
tinyllama_w8a16 = "nm-testing/tinyllama-w8a16-dense-hf-quantizer"
tinyllama_w4a16 = "nm-testing/tinyllama-w4a16-compressed-hf-quantizer"
tinyllama_w8a8 = "nm-testing/tinyllama-w8a8-compressed-hf-quantizer"
llama3_8b_fp8 = "nm-testing/Meta-Llama-3-8B-Instruct-fp8-hf_compat"
prompt = "Paris is the capital of which country?"
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def test_config_args(self):
with self.assertRaises(ValueError):
# passing quant scheme directly is not allowed
CompressedTensorsConfig(config_groups={"weights": {"num_bits": 8}})
CompressedTensorsConfig(
config_groups={"FP8": ["Linear"]},
ignore=["lm_head"],
quantization_status="frozen",
sparsity_config={"format": "dense"},
)
def test_config_to_from_dict(self):
config = CompressedTensorsConfig(config_groups={"FP8": ["Linear"]}, sparsity_config={"format": "dense"})
config_dict = config.to_dict()
config_from_dict = CompressedTensorsConfig.from_dict(config_dict)
from compressed_tensors import QuantizationConfig, SparsityCompressionConfig
self.assertIsInstance(config_from_dict.quantization_config, QuantizationConfig)
self.assertIsInstance(config_from_dict.sparsity_config, SparsityCompressionConfig)
def test_tinyllama_w8a8(self):
expected_out = "<s> Paris is the capital of which country?\n\n 1. Paris is the capital of which country?\n\n 1. Paris is the capital of which country?\n\n 1. Paris is the capital of which country?\n\n"
self._test_quantized_model(self.tinyllama_w8a8, expected_out)
def test_tinyllama_w4a16(self):
expected_out = "<s> Paris is the capital of which country?\nAnswer: Paris is the capital of France.\nQuestion: Which country is the capital of which city?\nAnswer: The capital of the city of New York is New York.\nQuestion: Which"
self._test_quantized_model(self.tinyllama_w4a16, expected_out)
def test_tinyllama_w8a16(self):
expected_out = "<s> Paris is the capital of which country?\nA. France\nB. Germany\nC. Spain\nD. Italy\nE. Switzerland\nQ10. Which of the following is not a country in the European Union?\nA."
self._test_quantized_model(self.tinyllama_w8a16, expected_out)
def test_llama_8b_fp8(self):
expected_out = "<|begin_of_text|>Paris is the capital of which country? France\nWhat is the name of the famous museum in Paris that is home to the Mona Lisa? The Louvre\nWhat is the name of the famous bridge in Paris that is often associated with the city"
self._test_quantized_model(self.llama3_8b_fp8, expected_out)
def _test_quantized_model(self, model_name: str, expected_output: str):
"""Carry out generation"""
quantized_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_name)
device = quantized_model.device
self.assertIsNotNone(
quantized_model.config.quantization_config,
"quantization_config should not be None",
)
self.assertTrue(
any(
key
for key, tensor in quantized_model.state_dict().items()
if "scale" in key and not torch.all(tensor == 1.0)
),
"quantized model should load a non-trivial scale into the state dict",
)
inputs = tokenizer(self.prompt, return_tensors="pt").to(device)
generated_ids = quantized_model.generate(**inputs, max_length=50, do_sample=False)
outputs = tokenizer.batch_decode(generated_ids)
self.assertIsNotNone(outputs)
self.assertEqual(outputs[0], expected_output)
|
import gc
import unittest
from transformers import AutoModelForCausalLM, AutoTokenizer, CompressedTensorsConfig
from transformers.testing_utils import require_compressed_tensors, require_torch
from transformers.utils import is_torch_available
if is_torch_available():
import torch
@require_compressed_tensors
@require_torch
class CompressedTensorsTest(unittest.TestCase):
tinyllama_w8a16 = "nm-testing/tinyllama-w8a16-dense-hf-quantizer"
tinyllama_w4a16 = "nm-testing/tinyllama-w4a16-compressed-hf-quantizer"
tinyllama_w8a8 = "nm-testing/tinyllama-w8a8-compressed-hf-quantizer"
llama3_8b_fp8 = "nm-testing/Meta-Llama-3-8B-Instruct-fp8-hf_compat"
prompt = "Paris is the capital of which country?"
def tearDown(self):
gc.collect()
torch.cuda.empty_cache()
gc.collect()
def test_config_args(self):
with self.assertRaises(ValueError):
# passing quant scheme directly is not allowed
CompressedTensorsConfig(config_groups={"weights": {"num_bits": 8}})
CompressedTensorsConfig(
config_groups={"FP8": ["Linear"]},
ignore=["lm_head"],
quantization_status="frozen",
sparsity_config={"format": "dense"},
)
def test_config_to_from_dict(self):
config = CompressedTensorsConfig(config_groups={"FP8": ["Linear"]}, sparsity_config={"format": "dense"})
config_dict = config.to_dict()
config_from_dict = CompressedTensorsConfig.from_dict(config_dict)
from compressed_tensors import QuantizationConfig, SparsityCompressionConfig
self.assertIsInstance(config_from_dict.quantization_config, QuantizationConfig)
self.assertIsInstance(config_from_dict.sparsity_config, SparsityCompressionConfig)
def test_tinyllama_w8a8(self):
expected_out = "<s> Paris is the capital of which country?\n\n 1. Paris is the capital of which country?\n\n 1. Paris is the capital of which country?\n\n 1. Paris is the capital of which country?\n\n"
self._test_quantized_model(self.tinyllama_w8a8, expected_out)
def test_tinyllama_w4a16(self):
expected_out = "<s> Paris is the capital of which country?\nAnswer: Paris is the capital of France.\nQuestion: Which country is the capital of which city?\nAnswer: The capital of the city of New York is New York.\nQuestion: Which"
self._test_quantized_model(self.tinyllama_w4a16, expected_out)
def test_tinyllama_w8a16(self):
expected_out = "<s> Paris is the capital of which country?\nA. France\nB. Germany\nC. Spain\nD. Italy\nE. Switzerland\nQ10. Which of the following is not a country in the European Union?\nA."
self._test_quantized_model(self.tinyllama_w8a16, expected_out)
def test_llama_8b_fp8(self):
expected_out = "<|begin_of_text|>Paris is the capital of which country? France\nWhat is the name of the famous museum in Paris that is home to the Mona Lisa? The Louvre\nWhat is the name of the famous bridge in Paris that is often associated with the city"
self._test_quantized_model(self.llama3_8b_fp8, expected_out)
def _test_quantized_model(self, model_name: str, expected_output: str):
"""Carry out generation"""
quantized_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_name)
device = quantized_model.device
self.assertIsNotNone(
quantized_model.config.quantization_config,
"quantization_config should not be None",
)
self.assertTrue(
any(
key
for key, tensor in quantized_model.state_dict().items()
if "scale" in key and not torch.all(tensor == 1.0)
),
"quantized model should load a non-trivial scale into the state dict",
)
inputs = tokenizer(self.prompt, return_tensors="pt").to(device)
generated_ids = quantized_model.generate(**inputs, max_length=50, do_sample=False)
outputs = tokenizer.batch_decode(generated_ids)
self.assertIsNotNone(outputs)
self.assertEqual(outputs[0], expected_output)
|
"""
Copyright (c) 2013, Triad National Security, LLC
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Triad National Security, LLC nor the names of its contributors may be used to endorse or
promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from typing import Optional
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class YelpToolSpec(BaseToolSpec):
"""Yelp tool spec."""
# TODO add disclaimer
spec_functions = ["business_search", "business_reviews"]
def __init__(self, api_key: str, client_id: str) -> Document:
"""Initialize with parameters."""
from yelpapi import YelpAPI
self.client = YelpAPI(api_key)
def business_search(self, location: str, term: str, radius: Optional[int] = None):
"""
Make a query to Yelp to find businesses given a location to search.
Args:
Businesses returned in the response may not be strictly within the specified location.
term (str): Search term, e.g. "food" or "restaurants", The term may also be the business's name, such as "Starbucks"
radius (int): A suggested search radius in meters. This field is used as a suggestion to the search. The actual search radius may be lower than the suggested radius in dense urban areas, and higher in regions of less business density.
"""
response = self.client.search_query(location=location, term=term)
return [Document(text=str(response))]
def business_reviews(self, id: str):
"""
Make a query to Yelp to find a business using an id from business_search.
Args:
# The id
"""
response = self.client.reviews_query(id=id)
return [Document(text=str(response))]
|
"""
Copyright (c) 2013, Triad National Security, LLC
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Triad National Security, LLC nor the names of its contributors may be used to endorse or
promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from typing import Optional
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class YelpToolSpec(BaseToolSpec):
"""Yelp tool spec."""
# TODO add disclaimer
spec_functions = ["business_search", "business_reviews"]
def __init__(self, api_key: str, client_id: str) -> Document:
"""Initialize with parameters."""
from yelpapi import YelpAPI
self.client = YelpAPI(api_key)
def business_search(self, location: str, term: str, radius: Optional[int] = None):
"""
Make a query to Yelp to find businesses given a location to search.
Args:
Businesses returned in the response may not be strictly within the specified location.
term (str): Search term, e.g. "food" or "restaurants", The term may also be the business's name, such as "Starbucks"
radius (int): A suggested search radius in meters. This field is used as a suggestion to the search. The actual search radius may be lower than the suggested radius in dense urban areas, and higher in regions of less business density.
"""
response = self.client.search_query(location=location, term=term)
return [Document(text=str(response))]
def business_reviews(self, id: str):
"""
Make a query to Yelp to find a business uising an id from business_search.
Args:
# The id
"""
response = self.client.reviews_query(id=id)
return [Document(text=str(response))]
|
from __future__ import annotations
from typing import List
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_community.tools.azure_ai_services import (
AzureAiServicesDocumentIntelligenceTool,
AzureAiServicesImageAnalysisTool,
AzureAiServicesSpeechToTextTool,
AzureAiServicesTextAnalyticsForHealthTool,
AzureAiServicesTextToSpeechTool,
)
class AzureAiServicesToolkit(BaseToolkit):
"""Toolkit for Azure AI Services."""
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools: List[BaseTool] = [
AzureAiServicesDocumentIntelligenceTool(), # type: ignore[call-arg]
AzureAiServicesImageAnalysisTool(),
AzureAiServicesSpeechToTextTool(), # type: ignore[call-arg]
AzureAiServicesTextToSpeechTool(), # type: ignore[call-arg]
AzureAiServicesTextAnalyticsForHealthTool(), # type: ignore[call-arg]
]
return tools
|
from __future__ import annotations
from typing import List
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_community.tools.azure_ai_services import (
AzureAiServicesDocumentIntelligenceTool,
AzureAiServicesImageAnalysisTool,
AzureAiServicesSpeechToTextTool,
AzureAiServicesTextAnalyticsForHealthTool,
AzureAiServicesTextToSpeechTool,
)
class AzureAiServicesToolkit(BaseToolkit):
"""Toolkit for Azure AI Services."""
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools: List[BaseTool] = [
AzureAiServicesDocumentIntelligenceTool(), # type: ignore[call-arg]
AzureAiServicesImageAnalysisTool(), # type: ignore[call-arg]
AzureAiServicesSpeechToTextTool(), # type: ignore[call-arg]
AzureAiServicesTextToSpeechTool(), # type: ignore[call-arg]
AzureAiServicesTextAnalyticsForHealthTool(), # type: ignore[call-arg]
]
return tools
|
# coding: utf-8
"""Find the path to xgboost dynamic library files."""
import os
import platform
import sys
from typing import List
class XGBoostLibraryNotFound(Exception):
"""Error thrown by when xgboost is not found"""
def is_sphinx_build() -> bool:
"""`XGBOOST_BUILD_DOC` is used by the sphinx conf.py to skip building the C++ code."""
return bool(os.environ.get("XGBOOST_BUILD_DOC", False))
def find_lib_path() -> List[str]:
"""Find the path to xgboost dynamic library files.
Returns
-------
lib_path
List of all found library path to xgboost
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
dll_path = [
# normal, after installation `lib` is copied into Python package tree.
os.path.join(curr_path, "lib"),
# editable installation, no copying is performed.
os.path.join(curr_path, os.path.pardir, os.path.pardir, "lib"),
# use libxgboost from a system prefix, if available. This should be the last
# option.
os.path.join(sys.base_prefix, "lib"),
]
if sys.platform == "win32":
# On Windows, Conda may install libs in different paths
dll_path.extend(
[
os.path.join(sys.base_prefix, "bin"),
os.path.join(sys.base_prefix, "Library"),
os.path.join(sys.base_prefix, "Library", "bin"),
os.path.join(sys.base_prefix, "Library", "lib"),
os.path.join(sys.base_prefix, "Library", "mingw-w64"),
os.path.join(sys.base_prefix, "Library", "mingw-w64", "bin"),
os.path.join(sys.base_prefix, "Library", "mingw-w64", "lib"),
]
)
dll_path = [os.path.join(p, "xgboost.dll") for p in dll_path]
elif sys.platform.startswith(("linux", "freebsd", "emscripten")):
dll_path = [os.path.join(p, "libxgboost.so") for p in dll_path]
elif sys.platform == "darwin":
dll_path = [os.path.join(p, "libxgboost.dylib") for p in dll_path]
elif sys.platform == "cygwin":
dll_path = [os.path.join(p, "cygxgboost.dll") for p in dll_path]
if platform.system() == "OS400":
dll_path = [os.path.join(p, "libxgboost.so") for p in dll_path]
lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]
if not lib_path and not is_sphinx_build():
link = "https://xgboost.readthedocs.io/en/stable/install.html"
msg = (
"Cannot find XGBoost Library in the candidate path. "
+ "List of candidates:\n- "
+ ("\n- ".join(dll_path))
+ "\nXGBoost Python package path: "
+ curr_path
+ "\nsys.base_prefix: "
+ sys.base_prefix
+ "\nSee: "
+ link
+ " for installing XGBoost."
)
raise XGBoostLibraryNotFound(msg)
return lib_path
|
# coding: utf-8
"""Find the path to xgboost dynamic library files."""
import os
import platform
import sys
from typing import List
class XGBoostLibraryNotFound(Exception):
"""Error thrown by when xgboost is not found"""
def find_lib_path() -> List[str]:
"""Find the path to xgboost dynamic library files.
Returns
-------
lib_path
List of all found library path to xgboost
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
dll_path = [
# normal, after installation `lib` is copied into Python package tree.
os.path.join(curr_path, "lib"),
# editable installation, no copying is performed.
os.path.join(curr_path, os.path.pardir, os.path.pardir, "lib"),
# use libxgboost from a system prefix, if available. This should be the last
# option.
os.path.join(sys.base_prefix, "lib"),
]
if sys.platform == "win32":
# On Windows, Conda may install libs in different paths
dll_path.extend(
[
os.path.join(sys.base_prefix, "bin"),
os.path.join(sys.base_prefix, "Library"),
os.path.join(sys.base_prefix, "Library", "bin"),
os.path.join(sys.base_prefix, "Library", "lib"),
os.path.join(sys.base_prefix, "Library", "mingw-w64"),
os.path.join(sys.base_prefix, "Library", "mingw-w64", "bin"),
os.path.join(sys.base_prefix, "Library", "mingw-w64", "lib"),
]
)
dll_path = [os.path.join(p, "xgboost.dll") for p in dll_path]
elif sys.platform.startswith(("linux", "freebsd", "emscripten")):
dll_path = [os.path.join(p, "libxgboost.so") for p in dll_path]
elif sys.platform == "darwin":
dll_path = [os.path.join(p, "libxgboost.dylib") for p in dll_path]
elif sys.platform == "cygwin":
dll_path = [os.path.join(p, "cygxgboost.dll") for p in dll_path]
if platform.system() == "OS400":
dll_path = [os.path.join(p, "libxgboost.so") for p in dll_path]
lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]
# XGBOOST_BUILD_DOC is defined by sphinx conf.
if not lib_path and not os.environ.get("XGBOOST_BUILD_DOC", False):
link = "https://xgboost.readthedocs.io/en/stable/install.html"
msg = (
"Cannot find XGBoost Library in the candidate path. "
+ "List of candidates:\n- "
+ ("\n- ".join(dll_path))
+ "\nXGBoost Python package path: "
+ curr_path
+ "\nsys.base_prefix: "
+ sys.base_prefix
+ "\nSee: "
+ link
+ " for installing XGBoost."
)
raise XGBoostLibraryNotFound(msg)
return lib_path
|
import os
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.typing.tensor.audio import AudioTensorFlowTensor
@pytest.mark.parametrize(
'tensor,cls_audio_tensor,cls_tensor',
[
(torch.zeros(1000, 2), AudioTorchTensor, torch.Tensor),
(np.zeros((1000, 2)), AudioNdArray, np.ndarray),
],
)
def test_set_audio_tensor(tensor, cls_audio_tensor, cls_tensor):
class MyAudioDoc(BaseDoc):
tensor: cls_audio_tensor
doc = MyAudioDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_audio_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.tensorflow
def test_set_audio_tensorflow_tensor():
class MyAudioDoc(BaseDoc):
tensor: AudioTensorFlowTensor
doc = MyAudioDoc(tensor=tf.zeros((1000, 2)))
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
assert tnp.allclose(doc.tensor.tensor, tf.zeros((1000, 2)))
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, np.zeros((1000, 2))),
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioTorchTensor, np.zeros((1000, 2))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.tensorflow
def test_validation_tensorflow():
arr = parse_obj_as(AudioTensorFlowTensor, tf.zeros((1000, 2)))
assert isinstance(arr, AudioTensorFlowTensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, torch.zeros(1000, 2)),
(AudioNdArray, 'hello'),
(AudioTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.proto
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(AudioTorchTensor, torch.zeros(1000, 2), AudioTorchTensor._proto_type_name),
(AudioNdArray, np.zeros((1000, 2)), AudioNdArray._proto_type_name),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert proto_key in str(proto)
@pytest.mark.tensorflow
def test_proto_tensor_tensorflow():
tensor = parse_obj_as(AudioTensorFlowTensor, tf.zeros((1000, 2)))
proto = tensor._to_node_protobuf()
assert AudioTensorFlowTensor._proto_type_name in str(proto)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioNdArray, np.zeros((1000, 2))),
],
)
def test_save_audio_tensor_to_wav_file(cls_tensor, tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio_tensor = parse_obj_as(cls_tensor, tensor)
audio_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.tensorflow
def test_save_audio_tensorflow_tensor_to_wav_file(tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio_tensor = parse_obj_as(AudioTensorFlowTensor, tf.zeros((1000, 2)))
audio_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
|
import os
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
from docarray.typing.tensor.audio import AudioTensorFlowTensor
@pytest.mark.parametrize(
'tensor,cls_audio_tensor,cls_tensor',
[
(torch.zeros(1000, 2), AudioTorchTensor, torch.Tensor),
(np.zeros((1000, 2)), AudioNdArray, np.ndarray),
],
)
def test_set_audio_tensor(tensor, cls_audio_tensor, cls_tensor):
class MyAudioDoc(BaseDoc):
tensor: cls_audio_tensor
doc = MyAudioDoc(tensor=tensor)
assert isinstance(doc.tensor, cls_audio_tensor)
assert isinstance(doc.tensor, cls_tensor)
assert (doc.tensor == tensor).all()
@pytest.mark.tensorflow
def test_set_audio_tensorflow_tensor():
class MyAudioDoc(BaseDoc):
tensor: AudioTensorFlowTensor
doc = MyAudioDoc(tensor=tf.zeros((1000, 2)))
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
assert tnp.allclose(doc.tensor.tensor, tf.zeros((1000, 2)))
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, np.zeros((1000, 2))),
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioTorchTensor, np.zeros((1000, 2))),
],
)
def test_validation(cls_tensor, tensor):
arr = parse_obj_as(cls_tensor, tensor)
assert isinstance(arr, cls_tensor)
@pytest.mark.tensorflow
def test_validation_tensorflow():
arr = parse_obj_as(AudioTensorFlowTensor, tf.zeros((1000, 2)))
assert isinstance(arr, AudioTensorFlowTensor)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioNdArray, torch.zeros(1000, 2)),
(AudioNdArray, 'hello'),
(AudioTorchTensor, 'hello'),
],
)
def test_illegal_validation(cls_tensor, tensor):
match = str(cls_tensor).split('.')[-1][:-2]
with pytest.raises(ValueError, match=match):
parse_obj_as(cls_tensor, tensor)
@pytest.mark.proto
@pytest.mark.parametrize(
'cls_tensor,tensor,proto_key',
[
(AudioTorchTensor, torch.zeros(1000, 2), AudioTorchTensor._proto_type_name),
(AudioNdArray, np.zeros((1000, 2)), AudioNdArray._proto_type_name),
],
)
def test_proto_tensor(cls_tensor, tensor, proto_key):
tensor = parse_obj_as(cls_tensor, tensor)
proto = tensor._to_node_protobuf()
assert proto_key in str(proto)
@pytest.mark.tensorflow
def test_proto_tensor_tensorflow():
tensor = parse_obj_as(AudioTensorFlowTensor, tf.zeros((1000, 2)))
proto = tensor._to_node_protobuf()
assert AudioTensorFlowTensor._proto_type_name in str(proto)
@pytest.mark.parametrize(
'cls_tensor,tensor',
[
(AudioTorchTensor, torch.zeros(1000, 2)),
(AudioNdArray, np.zeros((1000, 2))),
],
)
def test_save_audio_tensor_to_wav_file(cls_tensor, tensor, tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio_tensor = parse_obj_as(cls_tensor, tensor)
audio_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
@pytest.mark.tensorflow
def test_save_audio_tensorflow_tensor_to_wav_file(tmpdir):
tmp_file = str(tmpdir / 'tmp.wav')
audio_tensor = parse_obj_as(AudioTensorFlowTensor, tf.zeros((1000, 2)))
audio_tensor.save(tmp_file)
assert os.path.isfile(tmp_file)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch.nn as nn
from mmengine.hooks import Hook
from mmengine.model import is_model_wrapper
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class MeanTeacherHook(Hook):
"""Mean Teacher Hook.
Mean Teacher is an efficient semi-supervised learning method in
`Mean Teacher <https://arxiv.org/abs/1703.01780>`_.
This method requires two models with exactly the same structure,
as the student model and the teacher model, respectively.
The student model updates the parameters through gradient descent,
and the teacher model updates the parameters through
exponential moving average of the student model.
Compared with the student model, the teacher model
is smoother and accumulates more knowledge.
Args:
momentum (float): The momentum used for updating teacher's parameter.
Teacher's parameter are updated with the formula:
`teacher = (1-momentum) * teacher + momentum * student`.
Defaults to 0.001.
interval (int): Update teacher's parameter every interval iteration.
Defaults to 1.
skip_buffers (bool): Whether to skip the model buffers, such as
batchnorm running stats (running_mean, running_var), it does not
perform the ema operation. Default to True.
"""
def __init__(self,
momentum: float = 0.001,
interval: int = 1,
skip_buffer=True) -> None:
assert 0 < momentum < 1
self.momentum = momentum
self.interval = interval
self.skip_buffers = skip_buffer
def before_train(self, runner: Runner) -> None:
"""To check that teacher model and student model exist."""
model = runner.model
if is_model_wrapper(model):
model = model.module
assert hasattr(model, 'teacher')
assert hasattr(model, 'student')
# only do it at initial stage
if runner.iter == 0:
self.momentum_update(model, 1)
def after_train_iter(self,
runner: Runner,
batch_idx: int,
data_batch: Optional[dict] = None,
outputs: Optional[dict] = None) -> None:
"""Update teacher's parameter every self.interval iterations."""
if (runner.iter + 1) % self.interval != 0:
return
model = runner.model
if is_model_wrapper(model):
model = model.module
self.momentum_update(model, self.momentum)
def momentum_update(self, model: nn.Module, momentum: float) -> None:
"""Compute the moving average of the parameters using exponential
moving average."""
if self.skip_buffers:
for (src_name, src_parm), (dst_name, dst_parm) in zip(
model.student.named_parameters(),
model.teacher.named_parameters()):
dst_parm.data.mul_(1 - momentum).add_(
src_parm.data, alpha=momentum)
else:
for (src_parm,
dst_parm) in zip(model.student.state_dict().values(),
model.teacher.state_dict().values()):
# exclude num_tracking
if dst_parm.dtype.is_floating_point:
dst_parm.data.mul_(1 - momentum).add_(
src_parm.data, alpha=momentum)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
import torch.nn as nn
from mmengine.hooks import Hook
from mmengine.model import is_model_wrapper
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class MeanTeacherHook(Hook):
"""Mean Teacher Hook.
Mean Teacher is an efficient semi-supervised learning method in
`Mean Teacher <https://arxiv.org/abs/1703.01780>`_.
This method requires two models with exactly the same structure,
as the student model and the teacher model, respectively.
The student model updates the parameters through gradient descent,
and the teacher model updates the parameters through
exponential moving average of the student model.
Compared with the student model, the teacher model
is smoother and accumulates more knowledge.
Args:
momentum (float): The momentum used for updating teacher's parameter.
Teacher's parameter are updated with the formula:
`teacher = (1-momentum) * teacher + momentum * student`.
Defaults to 0.001.
interval (int): Update teacher's parameter every interval iteration.
Defaults to 1.
skip_buffers (bool): Whether to skip the model buffers, such as
batchnorm running stats (running_mean, running_var), it does not
perform the ema operation. Default to True.
"""
def __init__(self,
momentum: float = 0.001,
interval: int = 1,
skip_buffer=True) -> None:
assert 0 < momentum < 1
self.momentum = momentum
self.interval = interval
self.skip_buffers = skip_buffer
def before_train(self, runner: Runner) -> None:
"""To check that teacher model and student model exist."""
model = runner.model
if is_model_wrapper(model):
model = model.module
assert hasattr(model, 'teacher')
assert hasattr(model, 'student')
# only do it at initial stage
if runner.iter == 0:
self.momentum_update(model, 1)
def after_train_iter(self,
runner: Runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[dict] = None) -> None:
"""Update teacher's parameter every self.interval iterations."""
if (runner.iter + 1) % self.interval != 0:
return
model = runner.model
if is_model_wrapper(model):
model = model.module
self.momentum_update(model, self.momentum)
def momentum_update(self, model: nn.Module, momentum: float) -> None:
"""Compute the moving average of the parameters using exponential
moving average."""
if self.skip_buffers:
for (src_name, src_parm), (dst_name, dst_parm) in zip(
model.student.named_parameters(),
model.teacher.named_parameters()):
dst_parm.data.mul_(1 - momentum).add_(
src_parm.data, alpha=momentum)
else:
for (src_parm,
dst_parm) in zip(model.student.state_dict().values(),
model.teacher.state_dict().values()):
# exclude num_tracking
if dst_parm.dtype.is_floating_point:
dst_parm.data.mul_(1 - momentum).add_(
src_parm.data, alpha=momentum)
|
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import PISARetinaHead
class TestPISARetinaHead(TestCase):
def test_pisa_reitnanet_head_loss(self):
"""Tests pisa retinanet head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
isr=dict(k=2., bias=0.),
carl=dict(k=1., bias=0.2),
sampler=dict(type='PseudoSampler'),
allowed_border=-1,
pos_weight=-1,
debug=False))
pisa_retinanet_head = PISARetinaHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
train_cfg=cfg)
# pisa retina head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in pisa_retinanet_head.prior_generator.strides)
cls_scores, bbox_preds = pisa_retinanet_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = pisa_retinanet_head.loss_by_feat(
cls_scores, bbox_preds, [gt_instances], img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_carl_loss = empty_gt_losses['loss_carl']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_carl_loss.item(), 0,
'there should be no carl loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = pisa_retinanet_head.loss_by_feat(
cls_scores, bbox_preds, [gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_carl_loss = one_gt_losses['loss_carl']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_carl_loss.item(), 0,
'carl loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from math import ceil
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import PISARetinaHead
class TestPISARetinaHead(TestCase):
def test_pisa_reitnanet_head_loss(self):
"""Tests pisa retinanet head loss when truth is empty and non-empty."""
s = 300
img_metas = [{
'img_shape': (s, s),
'pad_shape': (s, s),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
isr=dict(k=2., bias=0.),
carl=dict(k=1., bias=0.2),
sampler=dict(type='PseudoSampler'),
allowed_border=-1,
pos_weight=-1,
debug=False))
pisa_retinanet_head = PISARetinaHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0),
train_cfg=cfg)
# pisa retina head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, ceil(s / stride[0]), ceil(s / stride[0]))
for stride in pisa_retinanet_head.prior_generator.strides)
cls_scores, bbox_preds = pisa_retinanet_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = pisa_retinanet_head.loss_by_feat(
cls_scores, bbox_preds, [gt_instances], img_metas)
# When there is no truth, cls_loss and box_loss should all be zero.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_carl_loss = empty_gt_losses['loss_carl']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_carl_loss.item(), 0,
'there should be no carl loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = pisa_retinanet_head.loss_by_feat(
cls_scores, bbox_preds, [gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_carl_loss = one_gt_losses['loss_carl']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_carl_loss.item(), 0,
'carl loss should be non-zero')
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torchaudio_unittest.common_utils import TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@parameterized.expand(
[
(F.convolve,),
(F.fftconvolve,),
]
)
def test_convolve(self, fn):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(fn, (x, y))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torchaudio_unittest.common_utils import TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@parameterized.expand(
[
(F.convolve,),
(F.fftconvolve,),
]
)
def test_convolve(self, fn):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(fn, (x, y))
|
_base_ = './cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py' # noqa
# please install mmcls>=0.22.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='small',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.6,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')))
optim_wrapper = dict(paramwise_cfg={
'decay_rate': 0.7,
'decay_type': 'layer_wise',
'num_layers': 12
})
|
_base_ = './cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py' # noqa
# please install mmcls>=0.22.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-small_3rdparty_32xb128-noema_in1k_20220301-303e75e3.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='small',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.6,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')))
optimizer = dict(
_delete_=True,
constructor='LearningRateDecayOptimizerConstructor',
type='AdamW',
lr=0.0002,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg={
'decay_rate': 0.7,
'decay_type': 'layer_wise',
'num_layers': 12
})
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import InformationRetrievalEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseInformationRetrievalEvaluator(InformationRetrievalEvaluator):
def __init__(
self,
queries: dict[str, str], # qid => query
corpus: dict[str, str], # cid => doc
relevant_docs: dict[str, set[str]], # qid => Set[cid]
corpus_chunk_size: int = 50000,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] | None = None,
main_score_function: str | SimilarityFunction | None = None,
query_prompt: str | None = None,
query_prompt_name: str | None = None,
corpus_prompt: str | None = None,
corpus_prompt_name: str | None = None,
) -> None:
return super().__init__(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
corpus_chunk_size=corpus_chunk_size,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
query_prompt=query_prompt,
query_prompt_name=query_prompt_name,
corpus_prompt=corpus_prompt,
corpus_prompt_name=corpus_prompt_name,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def compute_metrices(
self, model: SparseEncoder, corpus_model=None, corpus_embeddings: Tensor | None = None
) -> dict[str, float]:
return super().compute_metrices(model=model, corpus_model=corpus_model, corpus_embeddings=corpus_embeddings)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
prompt_name: str | None = None,
prompt: str | None = None,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
embeddings = model.encode(
sentences,
prompt_name=prompt_name,
prompt=prompt,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
logger.info(model.get_sparsity_stats(embeddings))
return embeddings
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import InformationRetrievalEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseInformationRetrievalEvaluator(InformationRetrievalEvaluator):
def __init__(
self,
queries: dict[str, str], # qid => query
corpus: dict[str, str], # cid => doc
relevant_docs: dict[str, set[str]], # qid => Set[cid]
corpus_chunk_size: int = 50000,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] | None = None,
main_score_function: str | SimilarityFunction | None = None,
query_prompt: str | None = None,
query_prompt_name: str | None = None,
corpus_prompt: str | None = None,
corpus_prompt_name: str | None = None,
) -> None:
return super().__init__(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
corpus_chunk_size=corpus_chunk_size,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
query_prompt=query_prompt,
query_prompt_name=query_prompt_name,
corpus_prompt=corpus_prompt,
corpus_prompt_name=corpus_prompt_name,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def compute_metrices(
self, model: SparseEncoder, corpus_model=None, corpus_embeddings: Tensor | None = None
) -> dict[str, float]:
return super().compute_metrices(model=model, corpus_model=corpus_model, corpus_embeddings=corpus_embeddings)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
prompt_name: str | None = None,
prompt: str | None = None,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
prompt_name=prompt_name,
prompt=prompt,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jina_cli.export import api_to_dict
from jina_cli.lookup import _build_lookup_table, lookup_and_print
from tests.helper import _generate_pod_args
def test_export_api(tmpdir):
with open(tmpdir / 'test.yml', 'w', encoding='utf-8') as fp:
JAML.dump(api_to_dict(), fp)
with open(tmpdir / 'test.json', 'w', encoding='utf-8') as fp:
json.dump(api_to_dict(), fp)
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_help_lookup(cli, capsys):
nkw2kw, kw2info = _build_lookup_table()
if cli not in {'--help', '--version', '--version-full'}:
assert cli in nkw2kw
lookup_and_print(cli)
captured = capsys.readouterr()
assert 'Traceback (most recent call last)' not in captured.out
def test_main_cli():
subprocess.check_call(['jina'])
def test_cli_help():
subprocess.check_call(['jina', 'help', 'deployment'])
@pytest.mark.parametrize('uses', ['jinaai://jina-ai/DummyHubExecutor'])
def test_cli_hub(uses):
subprocess.check_call(['jina', 'hub', '--help'])
for cmd in ['new', 'status', 'pull', 'push']:
subprocess.check_call(['jina', 'hub', cmd, '--help'])
subprocess.check_call(['jina', 'hub', 'pull', uses])
def test_cli_warn_unknown_args():
subprocess.check_call(['jina', 'help', 'deployment', '--abcdefg'])
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_all_cli(cli):
subprocess.check_call(['jina', cli, '--help'])
@pytest.mark.parametrize('smethod', ['fork', 'spawn'])
def test_all_start_method(smethod):
s = subprocess.check_output(
['jina', '-v'],
env=dict(os.environ, JINA_MP_START_METHOD=smethod),
stderr=subprocess.STDOUT,
)
assert 'UserWarning' in s.decode()
assert smethod in s.decode()
def test_help_non_exist():
s = subprocess.check_output(
['jina', 'help', 'abcdefg'],
stderr=subprocess.STDOUT,
)
assert 'misspelling' in s.decode()
def test_help_exist():
s = subprocess.check_output(
['jina', 'help', 'port'],
stderr=subprocess.STDOUT,
)
assert 'a CLI argument of Jina' in s.decode()
def test_parse_env_map():
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', '--env', 'key2=value2']
)
assert a.env == {'key1': 'value1', 'key2': 'value2'}
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', 'key2=value2', 'key3=3']
)
assert a.env == {'key1': 'value1', 'key2': 'value2', 'key3': 3}
@pytest.mark.slow
def test_ping():
a1 = _generate_pod_args()
a2 = set_ping_parser().parse_args(['executor', f'0.0.0.0:{a1.port[0]}'])
a3 = set_ping_parser().parse_args(
['executor', f'0.0.0.1:{a1.port[0]}', '--timeout', '1000']
)
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a2)
assert cm.value.code == 0
# test with bad address
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a3)
assert cm.value.code == 1
@pytest.mark.parametrize(
'cmd',
[
['jina', 'ping', 'flow', '127.0.0.1:8080'],
['jina', 'help', 'port'],
['jina', 'hub'],
],
)
def test_logo_silence(cmd):
from jina.constants import __resources_path__
with open(os.path.join(__resources_path__, 'jina.logo'), encoding='utf-8') as fp:
logo_str = fp.read()
s = subprocess.run(
cmd,
stdout=subprocess.PIPE,
)
assert logo_str not in s.stdout.decode()
|
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jina_cli.export import api_to_dict
from jina_cli.lookup import _build_lookup_table, lookup_and_print
from tests.helper import _generate_pod_args
def test_export_api(tmpdir):
with open(tmpdir / 'test.yml', 'w', encoding='utf-8') as fp:
JAML.dump(api_to_dict(), fp)
with open(tmpdir / 'test.json', 'w', encoding='utf-8') as fp:
json.dump(api_to_dict(), fp)
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_help_lookup(cli, capsys):
nkw2kw, kw2info = _build_lookup_table()
if cli not in {'--help', '--version', '--version-full'}:
assert cli in nkw2kw
lookup_and_print(cli)
captured = capsys.readouterr()
assert 'Traceback (most recent call last)' not in captured.out
def test_main_cli():
subprocess.check_call(['jina'])
def test_cli_help():
subprocess.check_call(['jina', 'help', 'deployment'])
@pytest.mark.parametrize(
'uses', ['jinaai://jina-ai/DummyHubExecutor']
)
def test_cli_hub(uses):
subprocess.check_call(['jina', 'hub', '--help'])
for cmd in ['new', 'status', 'pull', 'push']:
subprocess.check_call(['jina', 'hub', cmd, '--help'])
subprocess.check_call(['jina', 'hub', 'pull', uses])
def test_cli_warn_unknown_args():
subprocess.check_call(['jina', 'help', 'deployment', '--abcdefg'])
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_all_cli(cli):
subprocess.check_call(['jina', cli, '--help'])
@pytest.mark.parametrize('smethod', ['fork', 'spawn'])
def test_all_start_method(smethod):
s = subprocess.check_output(
['jina', '-v'],
env=dict(os.environ, JINA_MP_START_METHOD=smethod),
stderr=subprocess.STDOUT,
)
assert 'UserWarning' in s.decode()
assert smethod in s.decode()
def test_help_non_exist():
s = subprocess.check_output(
['jina', 'help', 'abcdefg'],
stderr=subprocess.STDOUT,
)
assert 'misspelling' in s.decode()
def test_help_exist():
s = subprocess.check_output(
['jina', 'help', 'port'],
stderr=subprocess.STDOUT,
)
assert 'a CLI argument of Jina' in s.decode()
def test_parse_env_map():
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', '--env', 'key2=value2']
)
assert a.env == {'key1': 'value1', 'key2': 'value2'}
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', 'key2=value2', 'key3=3']
)
assert a.env == {'key1': 'value1', 'key2': 'value2', 'key3': 3}
@pytest.mark.slow
def test_ping():
a1 = _generate_pod_args()
a2 = set_ping_parser().parse_args(['executor', f'0.0.0.0:{a1.port[0]}'])
a3 = set_ping_parser().parse_args(
['executor', f'0.0.0.1:{a1.port[0]}', '--timeout', '1000']
)
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a2)
assert cm.value.code == 0
# test with bad address
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a3)
assert cm.value.code == 1
@pytest.mark.parametrize(
'cmd',
[
['jina', 'ping', 'flow', '127.0.0.1:8080'],
['jina', 'help', 'port'],
['jina', 'hub'],
],
)
def test_logo_silence(cmd):
from jina.constants import __resources_path__
with open(os.path.join(__resources_path__, 'jina.logo'), encoding='utf-8') as fp:
logo_str = fp.read()
s = subprocess.run(
cmd,
stdout=subprocess.PIPE,
)
assert logo_str not in s.stdout.decode()
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='NASFCOS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False, eps=0),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='NASFCOS_FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
norm_cfg=dict(type='BN'),
conv_cfg=dict(type='DCNv2', deform_groups=2)),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_cfg=dict(type='GN', num_groups=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# dataset settings
train_dataloader = dict(batch_size=4, num_workers=2)
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='NASFCOS',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False, eps=0),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='NASFCOS_FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
norm_cfg=dict(type='BN'),
conv_cfg=dict(type='DCNv2', deform_groups=2)),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_cfg=dict(type='GN', num_groups=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# dataset settings
train_dataloader = dict(batch_size=4, num_workers=2)
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
|
_base_ = './mask-rcnn_r50_fpn_instaboost-4x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .base_detr import DetectionTransformer
from .boxinst import BoxInst
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .conditional_detr import ConditionalDETR
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .dab_detr import DABDETR
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .dino import DINO
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .glip import GLIP
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer', 'ConditionalDETR', 'DINO', 'DABDETR', 'GLIP'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .base_detr import DetectionTransformer
from .boxinst import BoxInst
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .conditional_detr import ConditionalDETR
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .dab_detr import DABDETR
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .dino import DINO
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer', 'ConditionalDETR', 'DINO', 'DABDETR'
]
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_gn')),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=norm_cfg,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_gn')),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
"""Default query for EmptyIndex."""
from typing import Any, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.indices.empty.base import EmptyIndex
from llama_index.core.prompts import BasePromptTemplate
from llama_index.core.prompts.default_prompts import DEFAULT_SIMPLE_INPUT_PROMPT
from llama_index.core.schema import NodeWithScore, QueryBundle
class EmptyIndexRetriever(BaseRetriever):
"""
EmptyIndex query.
Passes the raw LLM call to the underlying LLM model.
Args:
input_prompt (Optional[BasePromptTemplate]): A Simple Input Prompt
(see :ref:`Prompt-Templates`).
"""
def __init__(
self,
index: EmptyIndex,
input_prompt: Optional[BasePromptTemplate] = None,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._index = index
self._input_prompt = input_prompt or DEFAULT_SIMPLE_INPUT_PROMPT
super().__init__(callback_manager)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve relevant nodes."""
del query_bundle # Unused
return []
|
"""Default query for EmptyIndex."""
from typing import Any, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.indices.empty.base import EmptyIndex
from llama_index.core.prompts import BasePromptTemplate
from llama_index.core.prompts.default_prompts import DEFAULT_SIMPLE_INPUT_PROMPT
from llama_index.core.schema import NodeWithScore, QueryBundle
class EmptyIndexRetriever(BaseRetriever):
"""
EmptyIndex query.
Passes the raw LLM call to the underlying LLM model.
Args:
input_prompt (Optional[BasePromptTemplate]): A Simple Input Prompt
(see :ref:`Prompt-Templates`).
"""
def __init__(
self,
index: EmptyIndex,
input_prompt: Optional[BasePromptTemplate] = None,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
self._index = index
self._input_prompt = input_prompt or DEFAULT_SIMPLE_INPUT_PROMPT
super().__init__(callback_manager)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve relevant nodes."""
del query_bundle # Unused
return []
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sklearn.metrics import average_precision_score
from sentence_transformers import InputExample
from sentence_transformers.evaluation import BinaryClassificationEvaluator
logger = logging.getLogger(__name__)
class CEBinaryClassificationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and binary labels (0 and 1),
it compute the average precision and the best possible f1 score
"""
def __init__(
self,
sentence_pairs: list[list[str]],
labels: list[int],
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
):
assert len(sentence_pairs) == len(labels)
for label in labels:
assert label == 0 or label == 1
self.sentence_pairs = sentence_pairs
self.labels = np.asarray(labels)
self.name = name
if show_progress_bar is None:
show_progress_bar = (
logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG
)
self.show_progress_bar = show_progress_bar
self.csv_file = "CEBinaryClassificationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = [
"epoch",
"steps",
"Accuracy",
"Accuracy_Threshold",
"F1",
"F1_Threshold",
"Precision",
"Recall",
"Average_Precision",
]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("CEBinaryClassificationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(
self.sentence_pairs, convert_to_numpy=True, show_progress_bar=self.show_progress_bar
)
acc, acc_threshold = BinaryClassificationEvaluator.find_best_acc_and_threshold(pred_scores, self.labels, True)
f1, precision, recall, f1_threshold = BinaryClassificationEvaluator.find_best_f1_and_threshold(
pred_scores, self.labels, True
)
ap = average_precision_score(self.labels, pred_scores)
logger.info(f"Accuracy: {acc * 100:.2f}\t(Threshold: {acc_threshold:.4f})")
logger.info(f"F1: {f1 * 100:.2f}\t(Threshold: {f1_threshold:.4f})")
logger.info(f"Precision: {precision * 100:.2f}")
logger.info(f"Recall: {recall * 100:.2f}")
logger.info(f"Average Precision: {ap * 100:.2f}\n")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc, acc_threshold, f1, f1_threshold, precision, recall, ap])
return ap
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sklearn.metrics import average_precision_score
from sentence_transformers import InputExample
from sentence_transformers.evaluation import BinaryClassificationEvaluator
logger = logging.getLogger(__name__)
class CEBinaryClassificationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and binary labels (0 and 1),
it compute the average precision and the best possible f1 score
"""
def __init__(
self,
sentence_pairs: list[list[str]],
labels: list[int],
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
):
assert len(sentence_pairs) == len(labels)
for label in labels:
assert label == 0 or label == 1
self.sentence_pairs = sentence_pairs
self.labels = np.asarray(labels)
self.name = name
if show_progress_bar is None:
show_progress_bar = (
logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG
)
self.show_progress_bar = show_progress_bar
self.csv_file = "CEBinaryClassificationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = [
"epoch",
"steps",
"Accuracy",
"Accuracy_Threshold",
"F1",
"F1_Threshold",
"Precision",
"Recall",
"Average_Precision",
]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CEBinaryClassificationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(
self.sentence_pairs, convert_to_numpy=True, show_progress_bar=self.show_progress_bar
)
acc, acc_threshold = BinaryClassificationEvaluator.find_best_acc_and_threshold(pred_scores, self.labels, True)
f1, precision, recall, f1_threshold = BinaryClassificationEvaluator.find_best_f1_and_threshold(
pred_scores, self.labels, True
)
ap = average_precision_score(self.labels, pred_scores)
logger.info("Accuracy: {:.2f}\t(Threshold: {:.4f})".format(acc * 100, acc_threshold))
logger.info("F1: {:.2f}\t(Threshold: {:.4f})".format(f1 * 100, f1_threshold))
logger.info("Precision: {:.2f}".format(precision * 100))
logger.info("Recall: {:.2f}".format(recall * 100))
logger.info("Average Precision: {:.2f}\n".format(ap * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc, acc_threshold, f1, f1_threshold, precision, recall, ap])
return ap
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if incompletly recieved file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
use_auth_token: Optional[Union[str, bool]] = None
ignore_url_params: bool = False
download_desc: Optional[str] = None
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if incompletly recieved file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, will get token from `~/.huggingface`.
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
use_auth_token: Optional[Union[str, bool]] = None
ignore_url_params: bool = False
download_desc: Optional[str] = None
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
|
from collections import defaultdict
import torch
import transforms as reference_transforms
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
return torchvision.transforms.v2, torchvision.datapoints
else:
return reference_transforms, None
class DetectionPresetTrain:
# Note: this transform assumes that the input to forward() are always PIL
# images, regardless of the backend parameter.
def __init__(
self,
*,
data_augmentation,
hflip_prob=0.5,
mean=(123.0, 117.0, 104.0),
backend="pil",
use_v2=False,
):
T, datapoints = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImageTensor())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if data_augmentation == "hflip":
transforms += [T.RandomHorizontalFlip(p=hflip_prob)]
elif data_augmentation == "lsj":
transforms += [
T.ScaleJitter(target_size=(1024, 1024), antialias=True),
# TODO: FixedSizeCrop below doesn't work on tensors!
reference_transforms.FixedSizeCrop(size=(1024, 1024), fill=mean),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "multiscale":
transforms += [
T.RandomShortestSize(min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssd":
fill = defaultdict(lambda: mean, {datapoints.Mask: 0}) if use_v2 else list(mean)
transforms += [
T.RandomPhotometricDistort(),
T.RandomZoomOut(fill=fill),
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssdlite":
transforms += [
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
else:
raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
if backend == "pil":
# Note: we could just convert to pure tensors even in v2.
transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()]
transforms += [T.ConvertImageDtype(torch.float)]
if use_v2:
transforms += [
T.ConvertBoundingBoxFormat(datapoints.BoundingBoxFormat.XYXY),
T.SanitizeBoundingBoxes(),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class DetectionPresetEval:
def __init__(self, backend="pil", use_v2=False):
T, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()]
elif backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImageTensor()]
else:
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.ConvertImageDtype(torch.float)]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
from collections import defaultdict
import torch
import transforms as reference_transforms
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
return torchvision.transforms.v2, torchvision.datapoints
else:
return reference_transforms, None
class DetectionPresetTrain:
# Note: this transform assumes that the input to forward() are always PIL
# images, regardless of the backend parameter.
def __init__(
self,
*,
data_augmentation,
hflip_prob=0.5,
mean=(123.0, 117.0, 104.0),
backend="pil",
use_v2=False,
):
T, datapoints = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImageTensor())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if data_augmentation == "hflip":
transforms += [T.RandomHorizontalFlip(p=hflip_prob)]
elif data_augmentation == "lsj":
transforms += [
T.ScaleJitter(target_size=(1024, 1024), antialias=True),
# TODO: FixedSizeCrop below doesn't work on tensors!
reference_transforms.FixedSizeCrop(size=(1024, 1024), fill=mean),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "multiscale":
transforms += [
T.RandomShortestSize(min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssd":
fill = defaultdict(lambda: mean, {datapoints.Mask: 0}) if use_v2 else list(mean)
transforms += [
T.RandomPhotometricDistort(),
T.RandomZoomOut(fill=fill),
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssdlite":
transforms += [
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
else:
raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
if backend == "pil":
# Note: we could just convert to pure tensors even in v2.
transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()]
transforms += [T.ConvertImageDtype(torch.float)]
if use_v2:
transforms += [
T.ConvertBoundingBoxFormat(datapoints.BoundingBoxFormat.XYXY),
T.SanitizeBoundingBox(),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class DetectionPresetEval:
def __init__(self, backend="pil", use_v2=False):
T, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()]
elif backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImageTensor()]
else:
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.ConvertImageDtype(torch.float)]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
MEDIUM = "medium"
NOTION = "notion"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REPLICATE = "replicate"
REVID = "revid"
SLANT3D = "slant3d"
UNREAL_SPEECH = "unreal_speech"
# --8<-- [end:ProviderName]
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
MEDIUM = "medium"
NOTION = "notion"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REPLICATE = "replicate"
REVID = "revid"
SLANT3D = "slant3d"
UNREAL_SPEECH = "unreal_speech"
# --8<-- [end:ProviderName]
|
def __getattr__(name: str = "") -> None:
"""Raise an error on import since is deprecated."""
msg = (
"This module has been moved to langchain-experimental. "
"For more details: https://github.com/langchain-ai/langchain/discussions/11352."
"To access this code, install it with `pip install langchain-experimental`."
"`from langchain_experimental.llm_bash.base "
"import LLMBashChain`"
)
raise AttributeError(msg)
|
def __getattr__(name: str = "") -> None:
"""Raise an error on import since is deprecated."""
raise AttributeError(
"This module has been moved to langchain-experimental. "
"For more details: https://github.com/langchain-ai/langchain/discussions/11352."
"To access this code, install it with `pip install langchain-experimental`."
"`from langchain_experimental.llm_bash.base "
"import LLMBashChain`"
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, build_optimizer
from .default_constructor import DefaultOptimizerConstructor
__all__ = [
'OPTIMIZER_CONSTRUCTORS', 'OPTIMIZERS', 'DefaultOptimizerConstructor',
'build_optimizer'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import (OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, build_optimizer,
build_optimizer_constructor)
from .default_constructor import DefaultOptimizerConstructor
__all__ = [
'OPTIMIZER_CONSTRUCTORS', 'OPTIMIZERS', 'DefaultOptimizerConstructor',
'build_optimizer', 'build_optimizer_constructor'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
import torch.nn as nn
from mmengine.runner import autocast
from mmengine.utils import TORCH_VERSION, digit_version
class TestAmp(unittest.TestCase):
def test_autocast(self):
if not torch.cuda.is_available():
if digit_version(TORCH_VERSION) < digit_version('1.10.0'):
# `torch.cuda.amp.autocast` is only support in gpu mode, if
# cuda is not available, it will return an empty context and
# should not accept any arguments.
with self.assertRaisesRegex(RuntimeError,
'If pytorch versions is '):
with autocast():
pass
with autocast(enabled=False):
layer = nn.Conv2d(1, 1, 1)
res = layer(torch.randn(1, 1, 1, 1))
self.assertEqual(res.dtype, torch.float32)
else:
with autocast(device_type='cpu'):
# torch.autocast support cpu mode.
layer = nn.Conv2d(1, 1, 1)
res = layer(torch.randn(1, 1, 1, 1))
self.assertIn(res.dtype, (torch.bfloat16, torch.float16))
with autocast(enabled=False):
res = layer(torch.randn(1, 1, 1, 1))
self.assertEqual(res.dtype, torch.float32)
else:
if digit_version(TORCH_VERSION) < digit_version('1.10.0'):
devices = ['cuda']
else:
devices = ['cpu', 'cuda']
for device in devices:
with autocast(device_type=device):
# torch.autocast support cpu and cuda mode.
layer = nn.Conv2d(1, 1, 1).to(device)
res = layer(torch.randn(1, 1, 1, 1).to(device))
self.assertIn(res.dtype, (torch.bfloat16, torch.float16))
with autocast(enabled=False, device_type=device):
res = layer(torch.randn(1, 1, 1, 1).to(device))
self.assertEqual(res.dtype, torch.float32)
# Test with fp32_enabled
with autocast(enabled=False, device_type=device):
layer = nn.Conv2d(1, 1, 1).to(device)
res = layer(torch.randn(1, 1, 1, 1).to(device))
self.assertEqual(res.dtype, torch.float32)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
import torch.nn as nn
from mmengine.runner import autocast
from mmengine.utils import TORCH_VERSION, digit_version
class TestAmp(unittest.TestCase):
def test_autocast(self):
if not torch.cuda.is_available():
if digit_version(TORCH_VERSION) < digit_version('1.10.0'):
# `torch.cuda.amp.autocast` is only support in gpu mode, if
# cuda is not available, it will return an empty context and
# should not accept any arguments.
with self.assertRaisesRegex(RuntimeError,
'If pytorch versions is '):
with autocast():
pass
with autocast(enabled=False):
layer = nn.Conv2d(1, 1, 1)
res = layer(torch.randn(1, 1, 1, 1))
self.assertEqual(res.dtype, torch.float32)
else:
with autocast(device_type='cpu'):
# torch.autocast support cpu mode.
layer = nn.Conv2d(1, 1, 1)
res = layer(torch.randn(1, 1, 1, 1))
self.assertIn(res.dtype, (torch.bfloat16, torch.float16))
with autocast(enabled=False):
res = layer(torch.randn(1, 1, 1, 1))
self.assertEqual(res.dtype, torch.float32)
else:
if digit_version(TORCH_VERSION) < digit_version('1.10.0'):
devices = ['cuda']
else:
devices = ['cpu', 'cuda']
for device in devices:
with autocast():
# torch.autocast support cpu and cuda mode.
layer = nn.Conv2d(1, 1, 1).to(device)
res = layer(torch.randn(1, 1, 1, 1).to(device))
self.assertIn(res.dtype, (torch.bfloat16, torch.float16))
with autocast(enabled=False):
res = layer(torch.randn(1, 1, 1, 1).to(device))
self.assertEqual(res.dtype, torch.float32)
# Test with fp32_enabled
with autocast(enabled=False):
layer = nn.Conv2d(1, 1, 1).to(device)
res = layer(torch.randn(1, 1, 1, 1).to(device))
self.assertEqual(res.dtype, torch.float32)
|
"""CouchDB client."""
import json
import logging
from typing import Dict, List, Optional
import couchdb3
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SimpleCouchDBReader(BaseReader):
"""
Simple CouchDB reader.
Concatenates each CouchDB doc into Document used by LlamaIndex.
Args:
couchdb_url (str): CouchDB Full URL.
max_docs (int): Maximum number of documents to load.
"""
def __init__(
self,
user: str,
pwd: str,
host: str,
port: int,
couchdb_url: Optional[Dict] = None,
max_docs: int = 1000,
) -> None:
"""Initialize with parameters."""
if couchdb_url is not None:
self.client = couchdb3.Server(couchdb_url)
else:
self.client = couchdb3.Server(f"http://{user}:{pwd}@{host}:{port}")
self.max_docs = max_docs
def load_data(self, db_name: str, query: Optional[str] = None) -> List[Document]:
"""
Load data from the input directory.
Args:
db_name (str): name of the database.
query (Optional[str]): query to filter documents.
Defaults to None
Returns:
List[Document]: A list of documents.
"""
documents = []
db = self.client.get(db_name)
if query is None:
# if no query is specified, return all docs in database
logging.debug("showing all docs")
results = db.view("_all_docs", include_docs=True)
else:
logging.debug("executing query")
results = db.find(query)
if not isinstance(results, dict):
logging.debug(results.rows)
else:
logging.debug(results)
# check if more than one result
if (
not isinstance(results, dict)
and hasattr(results, "rows")
and results.rows is not None
):
for row in results.rows:
# check that the id field exists
if "id" not in row:
raise ValueError("`id` field not found in CouchDB document.")
documents.append(Document(text=json.dumps(row.doc)))
else:
# only one result
if results.get("docs") is not None:
for item in results.get("docs"):
# check that the _id field exists
if "_id" not in item:
raise ValueError("`_id` field not found in CouchDB document.")
documents.append(Document(text=json.dumps(item)))
return documents
|
"""CouchDB client."""
import json
import logging
from typing import Dict, List, Optional
import couchdb3
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SimpleCouchDBReader(BaseReader):
"""Simple CouchDB reader.
Concatenates each CouchDB doc into Document used by LlamaIndex.
Args:
couchdb_url (str): CouchDB Full URL.
max_docs (int): Maximum number of documents to load.
"""
def __init__(
self,
user: str,
pwd: str,
host: str,
port: int,
couchdb_url: Optional[Dict] = None,
max_docs: int = 1000,
) -> None:
"""Initialize with parameters."""
if couchdb_url is not None:
self.client = couchdb3.Server(couchdb_url)
else:
self.client = couchdb3.Server(f"http://{user}:{pwd}@{host}:{port}")
self.max_docs = max_docs
def load_data(self, db_name: str, query: Optional[str] = None) -> List[Document]:
"""Load data from the input directory.
Args:
db_name (str): name of the database.
query (Optional[str]): query to filter documents.
Defaults to None
Returns:
List[Document]: A list of documents.
"""
documents = []
db = self.client.get(db_name)
if query is None:
# if no query is specified, return all docs in database
logging.debug("showing all docs")
results = db.view("_all_docs", include_docs=True)
else:
logging.debug("executing query")
results = db.find(query)
if not isinstance(results, dict):
logging.debug(results.rows)
else:
logging.debug(results)
# check if more than one result
if (
not isinstance(results, dict)
and hasattr(results, "rows")
and results.rows is not None
):
for row in results.rows:
# check that the id field exists
if "id" not in row:
raise ValueError("`id` field not found in CouchDB document.")
documents.append(Document(text=json.dumps(row.doc)))
else:
# only one result
if results.get("docs") is not None:
for item in results.get("docs"):
# check that the _id field exists
if "_id" not in item:
raise ValueError("`_id` field not found in CouchDB document.")
documents.append(Document(text=json.dumps(item)))
return documents
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adadelta"])
class Adadelta(optimizer.Optimizer):
"""Optimizer that implements the Adadelta algorithm.
Adadelta optimization is a stochastic gradient descent method that is based
on adaptive learning rate per dimension to address two drawbacks:
- The continual decay of learning rates throughout training.
- The need for a manually selected global learning rate.
Adadelta is a more robust extension of Adagrad that adapts learning rates
based on a moving window of gradient updates, instead of accumulating all
past gradients. This way, Adadelta continues learning even when many updates
have been done. Compared to Adagrad, in the original version of Adadelta you
don't have to set an initial learning rate. In this version, the initial
learning rate can be set, as in most other Keras optimizers.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`. Note that `Adadelta`
tends to benefit from higher initial learning rate values compared
to other optimizers. To match the exact form in the original paper,
use 1.0.
rho: A floating point value. The decay rate. Defaults to `0.95`.
epsilon: Small floating point value for maintaining numerical stability.
{{base_optimizer_keyword_args}}
Reference:
- [Zeiler, 2012](http://arxiv.org/abs/1212.5701)
"""
def __init__(
self,
learning_rate=0.001,
rho=0.95,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adadelta",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
name=name,
**kwargs,
)
self.rho = rho
self.epsilon = epsilon
def build(self, var_list):
if self.built:
return
super().build(var_list)
self._accumulated_grads = self.add_optimizer_variables(
var_list, "accumulated_grad"
)
self._accumulated_delta_vars = self.add_optimizer_variables(
var_list, "accumulated_delta_var"
)
def update_step(self, grad, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
grad = ops.cast(grad, variable.dtype)
rho = self.rho
accumulated_grad = self._accumulated_grads[
self._get_variable_index(variable)
]
accumulated_delta_var = self._accumulated_delta_vars[
self._get_variable_index(variable)
]
def rms(x):
return ops.sqrt(ops.add(x, self.epsilon))
self.assign(
accumulated_grad,
ops.add(
rho * accumulated_grad, ops.multiply(1 - rho, ops.square(grad))
),
)
delta_var = ops.negative(
ops.divide(
ops.multiply(rms(accumulated_delta_var), grad),
rms(accumulated_grad),
)
)
self.assign(
accumulated_delta_var,
ops.add(
ops.multiply(rho, accumulated_delta_var),
ops.multiply(1 - rho, ops.square(delta_var)),
),
)
self.assign_add(variable, ops.multiply(lr, delta_var))
def get_config(self):
config = super().get_config()
config.update(
{
"rho": self.rho,
"epsilon": self.epsilon,
}
)
return config
Adadelta.__doc__ = Adadelta.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adadelta"])
class Adadelta(optimizer.Optimizer):
"""Optimizer that implements the Adadelta algorithm.
Adadelta optimization is a stochastic gradient descent method that is based
on adaptive learning rate per dimension to address two drawbacks:
- The continual decay of learning rates throughout training.
- The need for a manually selected global learning rate.
Adadelta is a more robust extension of Adagrad that adapts learning rates
based on a moving window of gradient updates, instead of accumulating all
past gradients. This way, Adadelta continues learning even when many updates
have been done. Compared to Adagrad, in the original version of Adadelta you
don't have to set an initial learning rate. In this version, the initial
learning rate can be set, as in most other Keras optimizers.
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`. Note that `Adadelta`
tends to benefit from higher initial learning rate values compared
to other optimizers. To match the exact form in the original paper,
use 1.0.
rho: A floating point value. The decay rate. Defaults to `0.95`.
epsilon: Small floating point value for maintaining numerical stability.
{{base_optimizer_keyword_args}}
Reference:
- [Zeiler, 2012](http://arxiv.org/abs/1212.5701)
"""
def __init__(
self,
learning_rate=0.001,
rho=0.95,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adadelta",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
name=name,
**kwargs,
)
self.rho = rho
self.epsilon = epsilon
def build(self, var_list):
if self.built:
return
super().build(var_list)
self._accumulated_grads = []
self._accumulated_delta_vars = []
for var in var_list:
self._accumulated_grads.append(
self.add_variable_from_reference(var, "accumulated_grad")
)
self._accumulated_delta_vars.append(
self.add_variable_from_reference(var, "accumulated_delta_var")
)
def update_step(self, grad, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
grad = ops.cast(grad, variable.dtype)
rho = self.rho
accumulated_grad = self._accumulated_grads[
self._get_variable_index(variable)
]
accumulated_delta_var = self._accumulated_delta_vars[
self._get_variable_index(variable)
]
def rms(x):
return ops.sqrt(ops.add(x, self.epsilon))
self.assign(
accumulated_grad,
ops.add(
rho * accumulated_grad, ops.multiply(1 - rho, ops.square(grad))
),
)
delta_var = ops.negative(
ops.divide(
ops.multiply(rms(accumulated_delta_var), grad),
rms(accumulated_grad),
)
)
self.assign(
accumulated_delta_var,
ops.add(
ops.multiply(rho, accumulated_delta_var),
ops.multiply(1 - rho, ops.square(delta_var)),
),
)
self.assign_add(variable, ops.multiply(lr, delta_var))
def get_config(self):
config = super().get_config()
config.update(
{
"rho": self.rho,
"epsilon": self.epsilon,
}
)
return config
Adadelta.__doc__ = Adadelta.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(
f'multiprocessing start method is set to `fork`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `fork`: {e!r}'
)
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.18.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(
f'multiprocessing start method is set to `fork`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `fork`: {e!r}'
)
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.18.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import CocoCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .coco_semantic import CocoSegDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import ConcatDataset, MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .isaid import iSAIDDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCocoDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .v3det import V3DetDataset
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'CocoCaptionDataset', 'RefCocoDataset',
'BaseSegDataset', 'ADE20KSegDataset', 'CocoSegDataset',
'ADE20KInstanceDataset', 'iSAIDDataset', 'V3DetDataset', 'ConcatDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import CocoCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .coco_semantic import CocoSegDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .isaid import iSAIDDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCocoDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'CocoCaptionDataset', 'RefCocoDataset',
'BaseSegDataset', 'ADE20KSegDataset', 'CocoSegDataset',
'ADE20KInstanceDataset', 'iSAIDDataset'
]
|
from typing import TYPE_CHECKING, Tuple, TypeVar
import numpy as np
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T', bound='Mesh3DUrl')
class Mesh3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self: T) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that needs to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(mesh_url=str(self))
def load(self: T) -> Tuple[np.ndarray, np.ndarray]:
"""
Load the data from the url into a tuple of two numpy.ndarrays containing
vertices and faces information.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import Mesh3DUrl
class MyDoc(BaseDocument):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="toydata/tetrahedron.obj")
vertices, faces = doc.mesh_url.load()
assert isinstance(vertices, np.ndarray)
assert isinstance(faces, np.ndarray)
:return: tuple of two np.ndarrays representing the mesh's vertices and faces
"""
mesh = self._load_trimesh_instance(force='mesh')
vertices = mesh.vertices.view(np.ndarray)
faces = mesh.faces.view(np.ndarray)
return vertices, faces
|
from typing import TYPE_CHECKING, Tuple, TypeVar
import numpy as np
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T', bound='Mesh3DUrl')
class Mesh3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self: T) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that needs to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(mesh_url=str(self))
def load(self: T) -> Tuple[np.ndarray, np.ndarray]:
"""
Load the data from the url into a tuple of two numpy.ndarrays containing
vertices and faces information.
EXAMPLE USAGE
.. code-block:: python
from docarray import Document
import numpy as np
from docarray.typing import Mesh3DUrl
class MyDoc(Document):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="toydata/tetrahedron.obj")
vertices, faces = doc.mesh_url.load()
assert isinstance(vertices, np.ndarray)
assert isinstance(faces, np.ndarray)
:return: tuple of two np.ndarrays representing the mesh's vertices and faces
"""
mesh = self._load_trimesh_instance(force='mesh')
vertices = mesh.vertices.view(np.ndarray)
faces = mesh.faces.view(np.ndarray)
return vertices, faces
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.5.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.4.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
_base_ = 'faster-rcnn_r50-caffe_fpn_1x_coco.py'
max_iter = 90000
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_iter,
by_epoch=False,
milestones=[60000, 80000],
gamma=0.1)
]
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=max_iter,
val_interval=10000)
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
log_processor = dict(by_epoch=False)
|
_base_ = 'faster-rcnn_r50-caffe_fpn_1x_coco.py'
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[60000, 80000])
# Runner type
runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000)
checkpoint_config = dict(interval=10000)
evaluation = dict(interval=10000, metric='bbox')
|
from pathlib import Path
import pytest
import numpy as np
import paddlehub as hub
from jina import Document, DocumentArray, Executor
from ...text_paddle import TextPaddleEncoder
@pytest.fixture(scope='function')
def model():
return hub.Module(name='ernie_tiny')
@pytest.fixture(scope='function')
def content():
return 'hello world'
@pytest.fixture(scope='function')
def document_array(content):
return DocumentArray([Document(content=content)])
@pytest.fixture(scope='function')
def parameters():
return {'traverse_paths': ['r'], 'batch_size': 10}
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
def test_text_paddle(model, document_array, content, parameters):
ex = TextPaddleEncoder()
assert ex.on_gpu is False
ex.encode(document_array, parameters)
for doc in document_array:
assert isinstance(doc.embedding, np.ndarray)
assert doc.embedding.shape == (1024,)
embeds = model.get_embedding([[content]])
pooled_features = []
for embed in embeds:
pooled_feature, _ = embed
pooled_features.append(pooled_feature)
assert (pooled_features == document_array[0].embedding).all()
|
import pytest
import numpy as np
import paddlehub as hub
from jina.executors import BaseExecutor
from jina import Document, DocumentArray
@pytest.fixture(scope='function')
def model():
return hub.Module(name='ernie_tiny')
@pytest.fixture(scope='function')
def content():
return 'hello world'
@pytest.fixture(scope='function')
def document_array(content):
return DocumentArray([Document(content=content)])
@pytest.fixture(scope='function')
def parameters(content):
return {'traverse_paths': ['r'], 'batch_size': 10}
def test_text_paddle(model, document_array, content, parameters):
ex = BaseExecutor.load_config('../../config.yml')
assert ex.on_gpu is False
ex.encode(document_array, parameters)
for doc in document_array:
assert isinstance(doc.embedding, np.ndarray)
assert doc.embedding.shape == (1024,)
embeds = model.get_embedding([[content]])
pooled_features = []
for embed in embeds:
pooled_feature, _ = embed
pooled_features.append(pooled_feature)
assert (pooled_features == document_array[0].embedding).all()
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import numerical_utils
NUM_CLASSES = 5
class TestNumericalUtils(testing.TestCase):
@parameterized.parameters(
[
((1,), (1, NUM_CLASSES)),
((3,), (3, NUM_CLASSES)),
((4, 3), (4, 3, NUM_CLASSES)),
((5, 4, 3), (5, 4, 3, NUM_CLASSES)),
((3, 1), (3, NUM_CLASSES)),
((3, 2, 1), (3, 2, NUM_CLASSES)),
]
)
def test_to_categorical(self, shape, expected_shape):
label = np.random.randint(0, NUM_CLASSES, shape)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
# Check shape
self.assertEqual(one_hot.shape, expected_shape)
# Make sure there is only one 1 in a row
self.assertTrue(np.all(one_hot.sum(axis=-1) == 1))
# Get original labels back from one hots
self.assertTrue(
np.all(np.argmax(one_hot, -1).reshape(label.shape) == label)
)
def test_to_categorical_without_num_classes(self):
label = [0, 2, 5]
one_hot = numerical_utils.to_categorical(label)
self.assertEqual(one_hot.shape, (3, 5 + 1))
def test_to_categorical_with_backend_tensor(self):
label = backend.convert_to_tensor(np.array([0, 2, 1, 3, 4]))
expected = backend.convert_to_tensor(
np.array(
[
[1, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
)
)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
assert backend.is_tensor(one_hot)
self.assertAllClose(one_hot, expected)
@parameterized.parameters([1, 2, 3])
def test_normalize(self, order):
xb = backend.random.uniform((3, 3), seed=1337)
xnp = backend.convert_to_numpy(xb)
# Expected result
l2 = np.atleast_1d(np.linalg.norm(xnp, order, axis=-1))
l2[l2 == 0] = 1
expected = xnp / np.expand_dims(l2, axis=-1)
# Test NumPy
out = numerical_utils.normalize(xnp, axis=-1, order=order)
self.assertIsInstance(out, np.ndarray)
self.assertAllClose(out, expected)
# Test backend
out = numerical_utils.normalize(xb, axis=-1, order=order)
self.assertTrue(backend.is_tensor(out))
self.assertAllClose(backend.convert_to_numpy(out), expected)
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import numerical_utils
NUM_CLASSES = 5
class TestNumericalUtils(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
[
((1,), (1, NUM_CLASSES)),
((3,), (3, NUM_CLASSES)),
((4, 3), (4, 3, NUM_CLASSES)),
((5, 4, 3), (5, 4, 3, NUM_CLASSES)),
((3, 1), (3, NUM_CLASSES)),
((3, 2, 1), (3, 2, NUM_CLASSES)),
]
)
def test_to_categorical(self, shape, expected_shape):
label = np.random.randint(0, NUM_CLASSES, shape)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
# Check shape
self.assertEqual(one_hot.shape, expected_shape)
# Make sure there is only one 1 in a row
self.assertTrue(np.all(one_hot.sum(axis=-1) == 1))
# Get original labels back from one hots
self.assertTrue(
np.all(np.argmax(one_hot, -1).reshape(label.shape) == label)
)
def test_to_categorical_without_num_classes(self):
label = [0, 2, 5]
one_hot = numerical_utils.to_categorical(label)
self.assertEqual(one_hot.shape, (3, 5 + 1))
def test_to_categorical_with_backend_tensor(self):
label = backend.convert_to_tensor(np.array([0, 2, 1, 3, 4]))
expected = backend.convert_to_tensor(
np.array(
[
[1, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
)
)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
assert backend.is_tensor(one_hot)
self.assertAllClose(one_hot, expected)
@parameterized.parameters([1, 2, 3])
def test_normalize(self, order):
xb = backend.random.uniform((3, 3), seed=1337)
xnp = backend.convert_to_numpy(xb)
# Expected result
l2 = np.atleast_1d(np.linalg.norm(xnp, order, axis=-1))
l2[l2 == 0] = 1
expected = xnp / np.expand_dims(l2, axis=-1)
# Test NumPy
out = numerical_utils.normalize(xnp, axis=-1, order=order)
self.assertIsInstance(out, np.ndarray)
self.assertAllClose(out, expected)
# Test backend
out = numerical_utils.normalize(xb, axis=-1, order=order)
self.assertTrue(backend.is_tensor(out))
self.assertAllClose(backend.convert_to_numpy(out), expected)
|
from functools import partial
from inspect import isclass
from typing import Any, Union, cast
from pydantic import BaseModel
from langchain_core.language_models import FakeListChatModel
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.messages import HumanMessage
from langchain_core.prompts.structured import StructuredPrompt
from langchain_core.runnables.base import Runnable, RunnableLambda
from langchain_core.utils.pydantic import is_basemodel_subclass
def _fake_runnable(
_: Any, *, schema: Union[dict, type[BaseModel]], value: Any = 42, **_kwargs: Any
) -> Union[BaseModel, dict]:
if isclass(schema) and is_basemodel_subclass(schema):
return schema(name="yo", value=value)
params = cast("dict", schema)["parameters"]
return {k: 1 if k != "value" else value for k, v in params.items()}
class FakeStructuredChatModel(FakeListChatModel):
"""Fake ChatModel for testing purposes."""
def with_structured_output(
self, schema: Union[dict, type[BaseModel]], **kwargs: Any
) -> Runnable:
return RunnableLambda(partial(_fake_runnable, schema=schema, **kwargs))
@property
def _llm_type(self) -> str:
return "fake-messages-list-chat-model"
FakeStructuredChatModel.model_rebuild()
def test_structured_prompt_pydantic() -> None:
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
OutputSchema,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=42)
def test_structured_prompt_dict() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
def test_structured_prompt_kwargs() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
value=7,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[("human", "I'm very structured, how about you?")], OutputSchema, value=7
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=7)
def test_structured_prompt_template_format() -> None:
prompt = StructuredPrompt(
[("human", "hi {{person.name}}")], schema={}, template_format="mustache"
)
assert prompt.messages[0].prompt.template_format == "mustache" # type: ignore[union-attr, union-attr]
assert prompt.input_variables == ["person"]
assert prompt.invoke({"person": {"name": "foo"}}).to_messages() == [
HumanMessage("hi foo")
]
|
from functools import partial
from inspect import isclass
from typing import Any, Union, cast
from pydantic import BaseModel
from langchain_core.language_models import FakeListChatModel
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.messages import HumanMessage
from langchain_core.prompts.structured import StructuredPrompt
from langchain_core.runnables.base import Runnable, RunnableLambda
from langchain_core.utils.pydantic import is_basemodel_subclass
def _fake_runnable(
input: Any, *, schema: Union[dict, type[BaseModel]], value: Any = 42, **_: Any
) -> Union[BaseModel, dict]:
if isclass(schema) and is_basemodel_subclass(schema):
return schema(name="yo", value=value)
params = cast("dict", schema)["parameters"]
return {k: 1 if k != "value" else value for k, v in params.items()}
class FakeStructuredChatModel(FakeListChatModel):
"""Fake ChatModel for testing purposes."""
def with_structured_output(
self, schema: Union[dict, type[BaseModel]], **kwargs: Any
) -> Runnable:
return RunnableLambda(partial(_fake_runnable, schema=schema, **kwargs))
@property
def _llm_type(self) -> str:
return "fake-messages-list-chat-model"
FakeStructuredChatModel.model_rebuild()
def test_structured_prompt_pydantic() -> None:
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
OutputSchema,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=42)
def test_structured_prompt_dict() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
def test_structured_prompt_kwargs() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
value=7,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[("human", "I'm very structured, how about you?")], OutputSchema, value=7
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=7)
def test_structured_prompt_template_format() -> None:
prompt = StructuredPrompt(
[("human", "hi {{person.name}}")], schema={}, template_format="mustache"
)
assert prompt.messages[0].prompt.template_format == "mustache" # type: ignore[union-attr, union-attr]
assert prompt.input_variables == ["person"]
assert prompt.invoke({"person": {"name": "foo"}}).to_messages() == [
HumanMessage("hi foo")
]
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.10.2.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.10.2.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List
import torch
import torch.nn as nn
from mmengine.registry import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def build_optimizer(model: nn.Module, cfg: dict) -> torch.optim.Optimizer:
"""Build function of optimizer.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer constructor, and use optimizer constructor to build the
optimizer. If ``constructor`` is not set, the
``DefaultOptimizerConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer and optimizer constructor.
default_scope (str, optional): The ``default_scope`` is used to
reset the current registry. Defaults to None.
Returns:
torch.optim.Optimizer: The built optimizer.
"""
optimizer_cfg = copy.deepcopy(cfg)
constructor_type = optimizer_cfg.pop('constructor',
'DefaultOptimizerConstructor')
paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None)
optim_constructor = OPTIMIZER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg))
optimizer = optim_constructor(model)
return optimizer
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Optional
import torch
import torch.nn as nn
from mmengine.registry import OPTIMIZER_CONSTRUCTORS, OPTIMIZERS
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def build_optimizer(
model: nn.Module,
cfg: dict,
default_scope: Optional[str] = None) -> torch.optim.Optimizer:
"""Build function of optimizer.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer constructor, and use optimizer constructor to build the
optimizer. If ``constructor`` is not set, the
``DefaultOptimizerConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer and optimizer constructor.
default_scope (str, optional): The ``default_scope`` is used to
reset the current registry. Defaults to None.
Returns:
torch.optim.Optimizer: The built optimizer.
"""
optimizer_cfg = copy.deepcopy(cfg)
constructor_type = optimizer_cfg.pop('constructor',
'DefaultOptimizerConstructor')
paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None)
optim_constructor = OPTIMIZER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optimizer_cfg=optimizer_cfg,
paramwise_cfg=paramwise_cfg),
default_scope=default_scope)
optimizer = optim_constructor(model, default_scope=default_scope)
return optimizer
|
from __future__ import annotations
import collections
import json
import os
import string
from collections.abc import Iterable
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
class WhitespaceTokenizer(WordTokenizer):
"""
Simple and fast white-space tokenizer. Splits sentence based on white spaces.
Punctuation are stripped from tokens.
"""
def __init__(
self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False
):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
def tokenize(self, text: str, **kwargs) -> list[int]:
if self.do_lower_case:
text = text.lower()
tokens = text.split()
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "whitespacetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "whitespacetokenizer_config.json")) as fIn:
config = json.load(fIn)
return WhitespaceTokenizer(**config)
|
from __future__ import annotations
import collections
import json
import os
import string
from typing import Iterable
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
class WhitespaceTokenizer(WordTokenizer):
"""
Simple and fast white-space tokenizer. Splits sentence based on white spaces.
Punctuation are stripped from tokens.
"""
def __init__(
self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False
):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
def tokenize(self, text: str, **kwargs) -> list[int]:
if self.do_lower_case:
text = text.lower()
tokens = text.split()
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "whitespacetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "whitespacetokenizer_config.json")) as fIn:
config = json.load(fIn)
return WhitespaceTokenizer(**config)
|
import pytest
import os
from typing import Generator
# this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it
# after the test. it also returns the value of the NVIDIA_API_KEY environment variable
# before it was masked so that it can be used in the test.
@pytest.fixture()
def masked_env_var() -> Generator[str, None, None]:
var = "NVIDIA_API_KEY"
# Save the current value of the environment variable, if it exists
val = os.environ.get(var, None)
# Remove the environment variable to simulate it being masked during the test
if val is not None:
del os.environ[var]
try:
# Yield the original value so it can be used in the test
yield val
finally:
# Restore the original environment variable if it was set
if val is not None:
os.environ[var] = val
else:
# If the variable was not originally set, ensure it's removed
if var in os.environ:
del os.environ[var]
def pytest_collection_modifyitems(config, items):
if "NVIDIA_API_KEY" not in os.environ:
skip_marker = pytest.mark.skip(
reason="requires NVIDIA_API_KEY environment variable or --nim-endpoint option"
)
for item in items:
if "integration" in item.keywords and not config.getoption(
"--nim-endpoint"
):
item.add_marker(skip_marker)
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--all-models",
action="store_true",
help="Run tests across all models",
)
parser.addoption(
"--model-id",
action="store",
help="Run tests for a specific chat model",
)
parser.addoption(
"--nim-endpoint",
type=str,
help="Run tests using NIM mode",
)
def get_mode(config: pytest.Config) -> dict:
nim_endpoint = config.getoption("--nim-endpoint")
if nim_endpoint:
return {"base_url": nim_endpoint}
return {}
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
from llama_index.llms.nvidia import NVIDIA
from llama_index.llms.nvidia.base import DEFAULT_MODEL
mode = get_mode(metafunc.config)
if "chat_model" in metafunc.fixturenames:
models = [DEFAULT_MODEL]
if model := metafunc.config.getoption("--model-id"):
models = [model]
elif metafunc.config.getoption("--all-models"):
models = [model.id for model in NVIDIA(**mode).available_models]
metafunc.parametrize("chat_model", models, ids=models)
@pytest.fixture()
def mode(request: pytest.FixtureRequest) -> dict:
return get_mode(request.config)
|
import pytest
import os
from typing import Generator
# this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it
# after the test. it also returns the value of the NVIDIA_API_KEY environment variable
# before it was masked so that it can be used in the test.
@pytest.fixture()
def masked_env_var() -> Generator[str, None, None]:
var = "NVIDIA_API_KEY"
try:
if val := os.environ.get(var, None):
del os.environ[var]
yield val
finally:
if val:
os.environ[var] = val
def pytest_collection_modifyitems(config, items):
if "NVIDIA_API_KEY" not in os.environ:
skip_marker = pytest.mark.skip(
reason="requires NVIDIA_API_KEY environment variable or --nim-endpoint option"
)
for item in items:
if "integration" in item.keywords and not config.getoption(
"--nim-endpoint"
):
item.add_marker(skip_marker)
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--all-models",
action="store_true",
help="Run tests across all models",
)
parser.addoption(
"--model-id",
action="store",
help="Run tests for a specific chat model",
)
parser.addoption(
"--nim-endpoint",
type=str,
help="Run tests using NIM mode",
)
def get_mode(config: pytest.Config) -> dict:
nim_endpoint = config.getoption("--nim-endpoint")
if nim_endpoint:
return {"base_url": nim_endpoint}
return {}
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
from llama_index.llms.nvidia import NVIDIA
from llama_index.llms.nvidia.base import DEFAULT_MODEL
mode = get_mode(metafunc.config)
if "chat_model" in metafunc.fixturenames:
models = [DEFAULT_MODEL]
if model := metafunc.config.getoption("--model-id"):
models = [model]
elif metafunc.config.getoption("--all-models"):
models = [model.id for model in NVIDIA(**mode).available_models]
metafunc.parametrize("chat_model", models, ids=models)
@pytest.fixture()
def mode(request: pytest.FixtureRequest) -> dict:
return get_mode(request.config)
|
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class HubSpotCompanyBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (create, update, get)", default="get"
)
company_data: dict = SchemaField(
description="Company data for create/update operations",
default_factory=dict,
)
domain: str = SchemaField(
description="Company domain for get/update operations", default=""
)
class Output(BlockSchema):
company: dict = SchemaField(description="Company information")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="3ae02219-d540-47cd-9c78-3ad6c7d9820a",
description="Manages HubSpot companies - create, update, and retrieve company information",
categories={BlockCategory.CRM},
input_schema=HubSpotCompanyBlock.Input,
output_schema=HubSpotCompanyBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com/crm/v3/objects/companies"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "create":
response = Requests().post(
base_url, headers=headers, json={"properties": input_data.company_data}
)
result = response.json()
yield "company", result
yield "status", "created"
elif input_data.operation == "get":
search_url = f"{base_url}/search"
search_data = {
"filterGroups": [
{
"filters": [
{
"propertyName": "domain",
"operator": "EQ",
"value": input_data.domain,
}
]
}
]
}
response = Requests().post(search_url, headers=headers, json=search_data)
result = response.json()
yield "company", result.get("results", [{}])[0]
yield "status", "retrieved"
elif input_data.operation == "update":
# First get company ID by domain
search_response = Requests().post(
f"{base_url}/search",
headers=headers,
json={
"filterGroups": [
{
"filters": [
{
"propertyName": "domain",
"operator": "EQ",
"value": input_data.domain,
}
]
}
]
},
)
company_id = search_response.json().get("results", [{}])[0].get("id")
if company_id:
response = Requests().patch(
f"{base_url}/{company_id}",
headers=headers,
json={"properties": input_data.company_data},
)
result = response.json()
yield "company", result
yield "status", "updated"
else:
yield "company", {}
yield "status", "company_not_found"
|
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HubSpotCompanyBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (create, update, get)", default="get"
)
company_data: dict = SchemaField(
description="Company data for create/update operations",
default_factory=dict,
)
domain: str = SchemaField(
description="Company domain for get/update operations", default=""
)
class Output(BlockSchema):
company: dict = SchemaField(description="Company information")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="3ae02219-d540-47cd-9c78-3ad6c7d9820a",
description="Manages HubSpot companies - create, update, and retrieve company information",
categories={BlockCategory.CRM},
input_schema=HubSpotCompanyBlock.Input,
output_schema=HubSpotCompanyBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com/crm/v3/objects/companies"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "create":
response = requests.post(
base_url, headers=headers, json={"properties": input_data.company_data}
)
result = response.json()
yield "company", result
yield "status", "created"
elif input_data.operation == "get":
search_url = f"{base_url}/search"
search_data = {
"filterGroups": [
{
"filters": [
{
"propertyName": "domain",
"operator": "EQ",
"value": input_data.domain,
}
]
}
]
}
response = requests.post(search_url, headers=headers, json=search_data)
result = response.json()
yield "company", result.get("results", [{}])[0]
yield "status", "retrieved"
elif input_data.operation == "update":
# First get company ID by domain
search_response = requests.post(
f"{base_url}/search",
headers=headers,
json={
"filterGroups": [
{
"filters": [
{
"propertyName": "domain",
"operator": "EQ",
"value": input_data.domain,
}
]
}
]
},
)
company_id = search_response.json().get("results", [{}])[0].get("id")
if company_id:
response = requests.patch(
f"{base_url}/{company_id}",
headers=headers,
json={"properties": input_data.company_data},
)
result = response.json()
yield "company", result
yield "status", "updated"
else:
yield "company", {}
yield "status", "company_not_found"
|
"""Generation output schema."""
from __future__ import annotations
from typing import Any, Literal, Optional
from langchain_core.load import Serializable
from langchain_core.utils._merge import merge_dicts
class Generation(Serializable):
"""A single text generation output.
Generation represents the response from an "old-fashioned" LLM that
generates regular text (not chat messages).
This model is used internally by chat model and will eventually
be mapped to a more general `LLMResult` object, and then projected into
an `AIMessage` object.
LangChain users working with chat models will usually access information via
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks). Please refer the `AIMessage` and `LLMResult` schema documentation
for more information.
"""
text: str
"""Generated text output."""
generation_info: Optional[dict[str, Any]] = None
"""Raw response from the provider.
May include things like the reason for finishing or token log probabilities.
"""
type: Literal["Generation"] = "Generation"
"""Type is used exclusively for serialization purposes.
Set to "Generation" for this class."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default namespace is ["langchain", "schema", "output"].
"""
return ["langchain", "schema", "output"]
class GenerationChunk(Generation):
"""Generation chunk, which can be concatenated with other Generation chunks."""
def __add__(self, other: GenerationChunk) -> GenerationChunk:
"""Concatenate two GenerationChunks."""
if isinstance(other, GenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return GenerationChunk(
text=self.text + other.text,
generation_info=generation_info or None,
)
else:
msg = (
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
)
raise TypeError(msg)
|
from __future__ import annotations
from typing import Any, Literal, Optional
from langchain_core.load import Serializable
from langchain_core.utils._merge import merge_dicts
class Generation(Serializable):
"""A single text generation output.
Generation represents the response from an "old-fashioned" LLM that
generates regular text (not chat messages).
This model is used internally by chat model and will eventually
be mapped to a more general `LLMResult` object, and then projected into
an `AIMessage` object.
LangChain users working with chat models will usually access information via
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks). Please refer the `AIMessage` and `LLMResult` schema documentation
for more information.
"""
text: str
"""Generated text output."""
generation_info: Optional[dict[str, Any]] = None
"""Raw response from the provider.
May include things like the reason for finishing or token log probabilities.
"""
type: Literal["Generation"] = "Generation"
"""Type is used exclusively for serialization purposes.
Set to "Generation" for this class."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "output"]
class GenerationChunk(Generation):
"""Generation chunk, which can be concatenated with other Generation chunks."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "output"]
def __add__(self, other: GenerationChunk) -> GenerationChunk:
if isinstance(other, GenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return GenerationChunk(
text=self.text + other.text,
generation_info=generation_info or None,
)
else:
msg = (
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
)
raise TypeError(msg)
|
"""Chat Message."""
from typing import Any, Literal
from typing_extensions import override
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class ChatMessage(BaseMessage):
"""Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
"""The type of the message (used during serialization). Defaults to "chat"."""
ChatMessage.model_rebuild()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
"""The type of the message (used during serialization).
Defaults to "ChatMessageChunk"."""
@override
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
msg = "Cannot concatenate ChatMessageChunks with different roles."
raise ValueError(msg)
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
elif isinstance(other, BaseMessageChunk):
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
else:
return super().__add__(other)
|
from typing import Any, Literal
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class ChatMessage(BaseMessage):
"""Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
"""The type of the message (used during serialization). Defaults to "chat"."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default is ["langchain", "schema", "messages"].
"""
return ["langchain", "schema", "messages"]
ChatMessage.model_rebuild()
class ChatMessageChunk(ChatMessage, BaseMessageChunk):
"""Chat Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
"""The type of the message (used during serialization).
Defaults to "ChatMessageChunk"."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default is ["langchain", "schema", "messages"].
"""
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
if isinstance(other, ChatMessageChunk):
if self.role != other.role:
msg = "Cannot concatenate ChatMessageChunks with different roles."
raise ValueError(msg)
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
elif isinstance(other, BaseMessageChunk):
return self.__class__(
role=self.role,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
else:
return super().__add__(other)
|
import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
__all__ = ['HTTPGatewayRuntime']
class HTTPGatewayRuntime(GatewayRuntime):
"""Runtime for HTTP interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
from jina.helper import extend_rest_interface
uvicorn_kwargs = self.args.uvicorn_kwargs or {}
for ssl_file in ['ssl_keyfile', 'ssl_certfile']:
if getattr(self.args, ssl_file):
if ssl_file not in uvicorn_kwargs.keys():
uvicorn_kwargs[ssl_file] = getattr(self.args, ssl_file)
self._server = UviServer(
config=Config(
app=extend_rest_interface(
get_fastapi_app(
args=self.args,
logger=self.logger,
timeout_send=self.timeout_send,
metrics_registry=self.metrics_registry,
)
),
host=__default_host__,
port=self.args.port,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs
)
)
await self._server.setup()
async def async_run_forever(self):
"""Running method of the server."""
await self._server.serve()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self._server.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self._server.shutdown()
async def async_cancel(self):
"""Stop the server."""
self._server.should_exit = True
|
import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.http.app import get_fastapi_app
__all__ = ['HTTPGatewayRuntime']
class HTTPGatewayRuntime(GatewayRuntime):
"""Runtime for HTTP interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
from jina.helper import extend_rest_interface
uvicorn_kwargs = self.args.uvicorn_kwargs or {}
for ssl_file in ['ssl_keyfile', 'ssl_certfile']:
if getattr(self.args, ssl_file):
if ssl_file not in uvicorn_kwargs.keys():
uvicorn_kwargs[ssl_file] = getattr(self.args, ssl_file)
self._set_topology_graph()
self._set_connection_pool()
self._server = UviServer(
config=Config(
app=extend_rest_interface(
get_fastapi_app(
self.args,
topology_graph=self._topology_graph,
connection_pool=self._connection_pool,
logger=self.logger,
metrics_registry=self.metrics_registry,
)
),
host=__default_host__,
port=self.args.port,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs
)
)
await self._server.setup()
async def async_run_forever(self):
"""Running method of the server."""
await self._server.serve()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self._server.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self._server.shutdown()
await self._connection_pool.close()
async def async_cancel(self):
"""Stop the server."""
self._server.should_exit = True
|
import gc
import asyncio
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.base.llms.types import (
ChatMessage,
CompletionResponse,
CompletionResponseGen,
)
from typing import Any
from llama_index.core.llms.callbacks import llm_completion_callback
from llama_index.core.llms.mock import MockLLM
import pytest
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.chat_engine.simple import SimpleChatEngine
def test_simple_chat_engine() -> None:
engine = SimpleChatEngine.from_defaults()
engine.reset()
response = engine.chat("Test message 1")
assert str(response) == "user: Test message 1\nassistant: "
response = engine.chat("Test message 2")
assert (
str(response)
== "user: Test message 1\nassistant: user: Test message 1\nassistant: \n"
"user: Test message 2\nassistant: "
)
engine.reset()
response = engine.chat("Test message 3")
assert str(response) == "user: Test message 3\nassistant: "
def test_simple_chat_engine_with_init_history() -> None:
engine = SimpleChatEngine.from_defaults(
chat_history=[
ChatMessage(role=MessageRole.USER, content="test human message"),
ChatMessage(role=MessageRole.ASSISTANT, content="test ai message"),
],
)
response = engine.chat("new human message")
assert (
str(response) == "user: test human message\nassistant: test ai message\n"
"user: new human message\nassistant: "
)
@pytest.mark.asyncio()
async def test_simple_chat_engine_astream():
engine = SimpleChatEngine.from_defaults()
response = await engine.astream_chat("Hello World!")
num_iters = 0
async for response_part in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert "Hello World!" in response.unformatted_response
assert len(engine.chat_history) == 2
response = await engine.astream_chat("What is the capital of the moon?")
num_iters = 0
async for _ in response.async_response_gen():
num_iters += 1
assert num_iters > 10
assert "Hello World!" in response.unformatted_response
assert "What is the capital of the moon?" in response.unformatted_response
def test_simple_chat_engine_astream_exception_handling():
"""Test that an exception thrown while retrieving the streamed LLM response gets bubbled up to the user.
Also tests that the non-retrieved exception does not remain in an task that was not awaited leading to
a 'Task exception was never retrieved' message during garbage collection.
"""
class ExceptionThrownInTest(Exception):
pass
class ExceptionMockLLM(MockLLM):
"""Raises an exception while streaming back the mocked LLM response."""
@classmethod
def class_name(cls) -> str:
return "ExceptionMockLLM"
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
def gen_prompt() -> CompletionResponseGen:
for ch in prompt:
yield CompletionResponse(
text=prompt,
delta=ch,
)
raise ExceptionThrownInTest("Exception thrown for testing purposes")
return gen_prompt()
async def async_part():
engine = SimpleChatEngine.from_defaults(
llm=ExceptionMockLLM(), memory=ChatMemoryBuffer.from_defaults()
)
response = await engine.astream_chat("Hello World!")
with pytest.raises(ExceptionThrownInTest):
async for response_part in response.async_response_gen():
pass
not_retrieved_exception = False
def custom_exception_handler(loop, context):
if context.get("message") == "Task exception was never retrieved":
nonlocal not_retrieved_exception
not_retrieved_exception = True
loop = asyncio.new_event_loop()
loop.set_exception_handler(custom_exception_handler)
result = loop.run_until_complete(async_part())
loop.close()
gc.collect()
if not_retrieved_exception:
pytest.fail(
"Exception was not correctly handled - ended up in asyncio cleanup performed during garbage collection"
)
|
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.chat_engine.simple import SimpleChatEngine
def test_simple_chat_engine() -> None:
engine = SimpleChatEngine.from_defaults()
engine.reset()
response = engine.chat("Test message 1")
assert str(response) == "user: Test message 1\nassistant: "
response = engine.chat("Test message 2")
assert (
str(response)
== "user: Test message 1\nassistant: user: Test message 1\nassistant: \n"
"user: Test message 2\nassistant: "
)
engine.reset()
response = engine.chat("Test message 3")
assert str(response) == "user: Test message 3\nassistant: "
def test_simple_chat_engine_with_init_history() -> None:
engine = SimpleChatEngine.from_defaults(
chat_history=[
ChatMessage(role=MessageRole.USER, content="test human message"),
ChatMessage(role=MessageRole.ASSISTANT, content="test ai message"),
],
)
response = engine.chat("new human message")
assert (
str(response) == "user: test human message\nassistant: test ai message\n"
"user: new human message\nassistant: "
)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005),
clip_grad=dict(max_norm=35, norm_type=2))
# learning policy
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
img_norm_cfg = dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
import os
import sysconfig
from typing import Optional
from torch.utils._triton import has_triton
def enable_triton(lib_dir: Optional[str] = None) -> dict[str, str]:
"""
Enable NVSHMEM device functions for Triton. It performs a NVSHMEM
device-side initialization on the kernel module created by Triton.
Args:
lib_dir (Optional[str]): The directory where the NVSHMEM device library
is located. If not provided, it will use the default path where NVSHMEM
wheel is installed.
Returns:
dict[str, str]: A dictionary containing the NVSHMEM device library name
and path.
"""
from triton.runtime.jit import JITFunction
from torch._C._distributed_c10d import _nvshmemx_cumodule_init
# Detect NVSHMEM device library path from python library path
if lib_dir is None:
py_lib_path = sysconfig.get_path("purelib")
lib_dir = py_lib_path + "/nvidia/nvshmem/lib"
lib_path = os.path.join(lib_dir, "libnvshmem_device.bc")
if not os.path.exists(lib_path):
raise RuntimeError("NVSHMEM device library not found")
extern_libs = {"libnvshmem_device": lib_path}
# A hook function to initialize NVSHMEM in Triton
def nvshmem_init_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
key = kwargs["key"]
device = kwargs["compile"]["device"]
jit_function = kwargs["fn"].jit_function
kernel_cache, _, _, _ = jit_function.device_caches[device]
kernel = kernel_cache.get(key, None)
kernel.run
_nvshmemx_cumodule_init(kernel.module)
# Register the function as a post-compile hook
JITFunction.compiled_hook = nvshmem_init_hook
# Return to user so that they can use it in Triton kernel invocation
return extern_libs
if has_triton():
from triton.language import core
@core.extern
def putmem_block(dst, src, nelems, pe, _builder=None): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[dst, src, nelems, pe],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmemx_putmem_block", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
@core.extern
def getmem_block(dst, src, nelems, pe, _builder=None): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[dst, src, nelems, pe],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmemx_getmem_block", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
@core.extern
def putmem_signal_block( # type: ignore[no-untyped-def]
dst,
src,
nelems,
sig_addr,
signal,
sig_op,
pe,
_builder=None,
): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[dst, src, nelems, sig_addr, signal, sig_op, pe],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmemx_putmem_signal_block", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
@core.extern
def wait_until(ivar, cmp, cmp_val, _builder=None): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[ivar, cmp, cmp_val],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmem_longlong_wait_until", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
|
import os
import sysconfig
from typing import Optional
from torch.utils._triton import has_triton
def enable_triton(lib_dir: Optional[str] = None) -> dict[str, str]:
"""
Enable NVSHMEM device functions for Triton. It performs a NVSHMEM
device-side initialization on the kernel module created by Triton.
Args:
lib_dir (Optional[str]): The directory where the NVSHMEM device library
is located. If not provided, it will use the default path where NVSHMEM
wheel is installed.
Returns:
dict[str, str]: A dictionary containing the NVSHMEM device library name
and path.
"""
from triton.runtime.jit import JITFunction
from torch._C._distributed_c10d import _nvshmemx_cumodule_init
# Detect NVSHMEM device library path from python library path
if lib_dir is None:
py_lib_path = sysconfig.get_path("purelib")
lib_dir = py_lib_path + "/nvidia/nvshmem/lib"
lib_path = os.path.join(lib_dir, "libnvshmem_device.bc")
if not os.path.exists(lib_path):
raise RuntimeError("NVSHMEM device library not found")
extern_libs = {"libnvshmem_device": lib_path}
# A hook function to initialize NVSHMEM in Triton
def nvshmem_init_hook(*args, **kwargs) -> None: # type: ignore[no-untyped-def]
key = kwargs["key"]
device = kwargs["compile"]["device"]
jit_function = kwargs["fn"].jit_function
kernel_cache, _, _, _ = jit_function.device_caches[device]
kernel = kernel_cache.get(key, None)
kernel.run
_nvshmemx_cumodule_init(kernel.module)
# Register the function as a post-compile hook
JITFunction.compiled_hook = nvshmem_init_hook
# Return to user so that they can use it in Triton kernel invocation
return extern_libs
if has_triton():
from triton.language import core
@core.extern
def putmem_block(dst, src, nelems, pe, _builder=None): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[dst, src, nelems, pe],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmemx_putmem_block", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
@core.extern
def getmem_block(dst, src, nelems, pe, _builder=None): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[dst, src, nelems, pe],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmemx_getmem_block", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
@core.extern
def putmem_signal_block( # type: ignore[no-untyped-def]
dst,
src,
nelems,
sig_addr,
signal,
sig_op,
pe,
_builder=None,
): # type: ignore[no-untyped-def]
return core.extern_elementwise(
"",
"",
[dst, src, nelems, sig_addr, signal, sig_op, pe],
{
(
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
core.dtype("int64"),
): ("nvshmemx_putmem_signal_block", core.dtype("int32"))
},
is_pure=False,
_builder=_builder,
)
|
_base_ = './cascade-rcnn_r50_fpn_20e_coco.py'
model = dict(
type='CascadeRCNN',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './cascade_rcnn_r50_fpn_20e_coco.py'
model = dict(
type='CascadeRCNN',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_train_epoch(self, runner, mode: str = 'train') -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (Runner): The runner of the training process.
"""
if hasattr(runner.train_loop.dataloader, 'sampler') and hasattr(
runner.train_loop.dataloader.sampler, 'set_epoch'):
# In case the` _SingleProcessDataLoaderIter` has no sampler,
# or data loader uses `SequentialSampler` in Pytorch.
runner.train_loop.dataloader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.train_loop.dataloader,
'batch_sampler') and hasattr(
runner.train_loop.dataloader.batch_sampler.sampler,
'set_epoch'):
# In case the` _SingleProcessDataLoaderIter` has no batch sampler.
# batch sampler in pytorch warps the sampler as its attributes.
runner.train_loop.dataloader.batch_sampler.sampler.set_epoch(
runner.epoch)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class DistSamplerSeedHook(Hook):
"""Data-loading sampler for distributed training.
When distributed training, it is only useful in conjunction with
:obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
purpose with :obj:`IterLoader`.
"""
priority = 'NORMAL'
def before_train_epoch(self, runner, mode: str = 'train') -> None:
"""Set the seed for sampler and batch_sampler.
Args:
runner (Runner): The runner of the training process.
"""
if hasattr(runner.train_loop.dataloader.sampler, 'set_epoch'):
# in case the data loader uses `SequentialSampler` in Pytorch
runner.train_loop.dataloader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.train_loop.dataloader.batch_sampler.sampler,
'set_epoch'):
# batch sampler in pytorch warps the sampler as its attributes.
runner.train_loop.dataloader.batch_sampler.sampler.set_epoch(
runner.epoch)
|
from typing import Dict, Set
from fastapi import WebSocket
from backend.data.execution import (
ExecutionEventType,
GraphExecutionEvent,
NodeExecutionEvent,
)
from backend.server.model import WSMessage, WSMethod
_EVENT_TYPE_TO_METHOD_MAP: dict[ExecutionEventType, WSMethod] = {
ExecutionEventType.GRAPH_EXEC_UPDATE: WSMethod.GRAPH_EXECUTION_EVENT,
ExecutionEventType.NODE_EXEC_UPDATE: WSMethod.NODE_EXECUTION_EVENT,
}
class ConnectionManager:
def __init__(self):
self.active_connections: Set[WebSocket] = set()
self.subscriptions: Dict[str, Set[WebSocket]] = {}
async def connect_socket(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.add(websocket)
def disconnect_socket(self, websocket: WebSocket):
self.active_connections.remove(websocket)
for subscribers in self.subscriptions.values():
subscribers.discard(websocket)
async def subscribe_graph_exec(
self, *, user_id: str, graph_exec_id: str, websocket: WebSocket
) -> str:
return await self._subscribe(
_graph_exec_channel_key(user_id, graph_exec_id=graph_exec_id), websocket
)
async def subscribe_graph_execs(
self, *, user_id: str, graph_id: str, websocket: WebSocket
) -> str:
return await self._subscribe(
_graph_execs_channel_key(user_id, graph_id=graph_id), websocket
)
async def unsubscribe_graph_exec(
self, *, user_id: str, graph_exec_id: str, websocket: WebSocket
) -> str | None:
return await self._unsubscribe(
_graph_exec_channel_key(user_id, graph_exec_id=graph_exec_id), websocket
)
async def unsubscribe_graph_execs(
self, *, user_id: str, graph_id: str, websocket: WebSocket
) -> str | None:
return await self._unsubscribe(
_graph_execs_channel_key(user_id, graph_id=graph_id), websocket
)
async def send_execution_update(
self, exec_event: GraphExecutionEvent | NodeExecutionEvent
) -> int:
graph_exec_id = (
exec_event.id
if isinstance(exec_event, GraphExecutionEvent)
else exec_event.graph_exec_id
)
n_sent = 0
channels: set[str] = {
# Send update to listeners for this graph execution
_graph_exec_channel_key(exec_event.user_id, graph_exec_id=graph_exec_id)
}
if isinstance(exec_event, GraphExecutionEvent):
# Send update to listeners for all executions of this graph
channels.add(
_graph_execs_channel_key(
exec_event.user_id, graph_id=exec_event.graph_id
)
)
for channel in channels.intersection(self.subscriptions.keys()):
message = WSMessage(
method=_EVENT_TYPE_TO_METHOD_MAP[exec_event.event_type],
channel=channel,
data=exec_event.model_dump(),
).model_dump_json()
for connection in self.subscriptions[channel]:
await connection.send_text(message)
n_sent += 1
return n_sent
async def _subscribe(self, channel_key: str, websocket: WebSocket) -> str:
if channel_key not in self.subscriptions:
self.subscriptions[channel_key] = set()
self.subscriptions[channel_key].add(websocket)
return channel_key
async def _unsubscribe(self, channel_key: str, websocket: WebSocket) -> str | None:
if channel_key in self.subscriptions:
self.subscriptions[channel_key].discard(websocket)
if not self.subscriptions[channel_key]:
del self.subscriptions[channel_key]
return channel_key
return None
def _graph_exec_channel_key(user_id: str, *, graph_exec_id: str) -> str:
return f"{user_id}|graph_exec#{graph_exec_id}"
def _graph_execs_channel_key(user_id: str, *, graph_id: str) -> str:
return f"{user_id}|graph#{graph_id}|executions"
|
from typing import Dict, Set
from fastapi import WebSocket
from backend.data.execution import (
ExecutionEventType,
GraphExecutionEvent,
NodeExecutionEvent,
)
from backend.server.model import WSMessage, WSMethod
_EVENT_TYPE_TO_METHOD_MAP: dict[ExecutionEventType, WSMethod] = {
ExecutionEventType.GRAPH_EXEC_UPDATE: WSMethod.GRAPH_EXECUTION_EVENT,
ExecutionEventType.NODE_EXEC_UPDATE: WSMethod.NODE_EXECUTION_EVENT,
}
class ConnectionManager:
def __init__(self):
self.active_connections: Set[WebSocket] = set()
self.subscriptions: Dict[str, Set[WebSocket]] = {}
async def connect_socket(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.add(websocket)
def disconnect_socket(self, websocket: WebSocket):
self.active_connections.remove(websocket)
for subscribers in self.subscriptions.values():
subscribers.discard(websocket)
async def subscribe_graph_exec(
self, *, user_id: str, graph_exec_id: str, websocket: WebSocket
) -> str:
key = _graph_exec_channel_key(user_id, graph_exec_id)
if key not in self.subscriptions:
self.subscriptions[key] = set()
self.subscriptions[key].add(websocket)
return key
async def unsubscribe(
self, *, user_id: str, graph_exec_id: str, websocket: WebSocket
) -> str | None:
key = _graph_exec_channel_key(user_id, graph_exec_id)
if key in self.subscriptions:
self.subscriptions[key].discard(websocket)
if not self.subscriptions[key]:
del self.subscriptions[key]
return key
return None
async def send_execution_update(
self, exec_event: GraphExecutionEvent | NodeExecutionEvent
) -> int:
graph_exec_id = (
exec_event.id
if isinstance(exec_event, GraphExecutionEvent)
else exec_event.graph_exec_id
)
key = _graph_exec_channel_key(exec_event.user_id, graph_exec_id)
n_sent = 0
if key in self.subscriptions:
message = WSMessage(
method=_EVENT_TYPE_TO_METHOD_MAP[exec_event.event_type],
channel=key,
data=exec_event.model_dump(),
).model_dump_json()
for connection in self.subscriptions[key]:
await connection.send_text(message)
n_sent += 1
return n_sent
def _graph_exec_channel_key(user_id: str, graph_exec_id: str) -> str:
return f"{user_id}|graph_exec#{graph_exec_id}"
|
import os
import urllib
import urllib.parse
import urllib.request
from typing import TYPE_CHECKING, Optional, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydantic.networks import Parts
from docarray.proto import NodeProto
T = TypeVar('T', bound='AnyUrl')
@_register_proto(proto_type_name='any_url')
class AnyUrl(BaseAnyUrl, AbstractType):
host_required = (
False # turn off host requirement to allow passing of local paths as URL
)
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(text=str(self), type=self._proto_type_name)
@classmethod
def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts':
"""
A method used to validate parts of a URL.
Our URLs should be able to function both in local and remote settings.
Therefore, we allow missing `scheme`, making it possible to pass a file
path without prefix.
If `scheme` is missing, we assume it is a local file path.
"""
scheme = parts['scheme']
if scheme is None:
# allow missing scheme, unlike pydantic
pass
elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
if validate_port:
cls._validate_port(parts['port'])
user = parts['user']
if cls.user_required and user is None:
raise errors.UrlUserInfoError()
return parts
@classmethod
def build(
cls,
*,
scheme: str,
user: Optional[str] = None,
password: Optional[str] = None,
host: str,
port: Optional[str] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
**_kwargs: str,
) -> str:
"""
Build a URL from its parts.
The only difference from the pydantic implementation is that we allow
missing `scheme`, making it possible to pass a file path without prefix.
"""
# allow missing scheme, unlike pydantic
scheme_ = scheme if scheme is not None else ''
url = super().build(
scheme=scheme_,
user=user,
password=password,
host=host,
port=port,
path=path,
query=query,
fragment=fragment,
**_kwargs,
)
if scheme is None and url.startswith('://'):
# remove the `://` prefix, since scheme is missing
url = url[3:]
return url
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read url from a proto msg
:param pb_msg:
:return: url
"""
return parse_obj_as(cls, pb_msg)
def load_bytes(self, timeout: Optional[float] = None) -> bytes:
"""Convert url to bytes. This will either load or download the file and save
it into a bytes object.
:param uri: the URI of Document. Can be a local file path or a (remote) URL
:param timeout: timeout for urlopen. Only relevant if URI is not local
:return: bytes.
"""
if urllib.parse.urlparse(self).scheme in {'http', 'https', 'data'}:
req = urllib.request.Request(self, headers={'User-Agent': 'Mozilla/5.0'})
urlopen_kwargs = {'timeout': timeout} if timeout is not None else {}
with urllib.request.urlopen(req, **urlopen_kwargs) as fp: # type: ignore
return fp.read()
elif os.path.exists(self):
with open(self, 'rb') as fp:
return fp.read()
else:
raise FileNotFoundError(f'`{self}` is not a URL or a valid local path')
|
from typing import TYPE_CHECKING, Optional, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydantic.networks import Parts
from docarray.proto import NodeProto
T = TypeVar('T', bound='AnyUrl')
@_register_proto(proto_type_name='any_url')
class AnyUrl(BaseAnyUrl, AbstractType):
host_required = (
False # turn off host requirement to allow passing of local paths as URL
)
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(text=str(self), type=self._proto_type_name)
@classmethod
def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts':
"""
A method used to validate parts of a URL.
Our URLs should be able to function both in local and remote settings.
Therefore, we allow missing `scheme`, making it possible to pass a file
path without prefix.
If `scheme` is missing, we assume it is a local file path.
"""
scheme = parts['scheme']
if scheme is None:
# allow missing scheme, unlike pydantic
pass
elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
if validate_port:
cls._validate_port(parts['port'])
user = parts['user']
if cls.user_required and user is None:
raise errors.UrlUserInfoError()
return parts
@classmethod
def build(
cls,
*,
scheme: str,
user: Optional[str] = None,
password: Optional[str] = None,
host: str,
port: Optional[str] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
**_kwargs: str,
) -> str:
"""
Build a URL from its parts.
The only difference from the pydantic implementation is that we allow
missing `scheme`, making it possible to pass a file path without prefix.
"""
# allow missing scheme, unlike pydantic
scheme_ = scheme if scheme is not None else ''
url = super().build(
scheme=scheme_,
user=user,
password=password,
host=host,
port=port,
path=path,
query=query,
fragment=fragment,
**_kwargs,
)
if scheme is None and url.startswith('://'):
# remove the `://` prefix, since scheme is missing
url = url[3:]
return url
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read url from a proto msg
:param pb_msg:
:return: url
"""
return parse_obj_as(cls, pb_msg)
|
"""
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and bicluster
it using the :class:`~sklearn.cluster.SpectralBiclustering` algorithm. The
spectral biclustering algorithm is specifically designed to cluster data by
simultaneously considering both the rows (samples) and columns (features) of a
matrix. It aims to identify patterns not only between samples but also within
subsets of samples, allowing for the detection of localized structure within the
data. This makes spectral biclustering particularly well-suited for datasets
where the order or arrangement of features is fixed, such as in images, time
series, or genomes.
The data is generated, then shuffled and passed to the spectral biclustering
algorithm. The rows and columns of the shuffled matrix are then rearranged to
plot the biclusters found.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Generate sample data
# --------------------
# We generate the sample data using the
# :func:`~sklearn.datasets.make_checkerboard` function. Each pixel within
# `shape=(300, 300)` represents with its color a value from a uniform
# distribution. The noise is added from a normal distribution, where the value
# chosen for `noise` is the standard deviation.
#
# As you can see, the data is distributed over 12 cluster cells and is
# relatively well distinguishable.
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10, shuffle=False, random_state=42
)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
_ = plt.show()
# %%
# We shuffle the data and the goal is to reconstruct it afterwards using
# :class:`~sklearn.cluster.SpectralBiclustering`.
import numpy as np
# Creating lists of shuffled row and column indices
rng = np.random.RandomState(0)
row_idx_shuffled = rng.permutation(data.shape[0])
col_idx_shuffled = rng.permutation(data.shape[1])
# %%
# We redefine the shuffled data and plot it. We observe that we lost the
# structure of original data matrix.
data = data[row_idx_shuffled][:, col_idx_shuffled]
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
_ = plt.show()
# %%
# Fitting `SpectralBiclustering`
# ------------------------------
# We fit the model and compare the obtained clusters with the ground truth. Note
# that when creating the model we specify the same number of clusters that we
# used to create the dataset (`n_clusters = (4, 3)`), which will contribute to
# obtain a good result.
from sklearn.cluster import SpectralBiclustering
from sklearn.metrics import consensus_score
model = SpectralBiclustering(n_clusters=n_clusters, method="log", random_state=0)
model.fit(data)
# Compute the similarity of two sets of biclusters
score = consensus_score(
model.biclusters_, (rows[:, row_idx_shuffled], columns[:, col_idx_shuffled])
)
print(f"consensus score: {score:.1f}")
# %%
# The score is between 0 and 1, where 1 corresponds to a perfect matching. It
# shows the quality of the biclustering.
# %%
# Plotting results
# ----------------
# Now, we rearrange the data based on the row and column labels assigned by the
# :class:`~sklearn.cluster.SpectralBiclustering` model in ascending order and
# plot again. The `row_labels_` range from 0 to 3, while the `column_labels_`
# range from 0 to 2, representing a total of 4 clusters per row and 3 clusters
# per column.
# Reordering first the rows and then the columns.
reordered_rows = data[np.argsort(model.row_labels_)]
reordered_data = reordered_rows[:, np.argsort(model.column_labels_)]
plt.matshow(reordered_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
_ = plt.show()
# %%
# As a last step, we want to demonstrate the relationships between the row
# and column labels assigned by the model. Therefore, we create a grid with
# :func:`numpy.outer`, which takes the sorted `row_labels_` and `column_labels_`
# and adds 1 to each to ensure that the labels start from 1 instead of 0 for
# better visualization.
plt.matshow(
np.outer(np.sort(model.row_labels_) + 1, np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues,
)
plt.title("Checkerboard structure of rearranged data")
plt.show()
# %%
# The outer product of the row and column label vectors shows a representation
# of the checkerboard structure, where different combinations of row and column
# labels are represented by different shades of blue.
|
"""
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and bicluster
it using the :class:`~sklearn.cluster.SpectralBiclustering` algorithm. The
spectral biclustering algorithm is specifically designed to cluster data by
simultaneously considering both the rows (samples) and columns (features) of a
matrix. It aims to identify patterns not only between samples but also within
subsets of samples, allowing for the detection of localized structure within the
data. This makes spectral biclustering particularly well-suited for datasets
where the order or arrangement of features is fixed, such as in images, time
series, or genomes.
The data is generated, then shuffled and passed to the spectral biclustering
algorithm. The rows and columns of the shuffled matrix are then rearranged to
plot the biclusters found.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Generate sample data
# --------------------
# We generate the sample data using the
# :func:`~sklearn.datasets.make_checkerboard` function. Each pixel within
# `shape=(300, 300)` represents with it's color a value from a uniform
# distribution. The noise is added from a normal distribution, where the value
# chosen for `noise` is the standard deviation.
#
# As you can see, the data is distributed over 12 cluster cells and is
# relatively well distinguishable.
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10, shuffle=False, random_state=42
)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
_ = plt.show()
# %%
# We shuffle the data and the goal is to reconstruct it afterwards using
# :class:`~sklearn.cluster.SpectralBiclustering`.
import numpy as np
# Creating lists of shuffled row and column indices
rng = np.random.RandomState(0)
row_idx_shuffled = rng.permutation(data.shape[0])
col_idx_shuffled = rng.permutation(data.shape[1])
# %%
# We redefine the shuffled data and plot it. We observe that we lost the
# structure of original data matrix.
data = data[row_idx_shuffled][:, col_idx_shuffled]
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
_ = plt.show()
# %%
# Fitting `SpectralBiclustering`
# ------------------------------
# We fit the model and compare the obtained clusters with the ground truth. Note
# that when creating the model we specify the same number of clusters that we
# used to create the dataset (`n_clusters = (4, 3)`), which will contribute to
# obtain a good result.
from sklearn.cluster import SpectralBiclustering
from sklearn.metrics import consensus_score
model = SpectralBiclustering(n_clusters=n_clusters, method="log", random_state=0)
model.fit(data)
# Compute the similarity of two sets of biclusters
score = consensus_score(
model.biclusters_, (rows[:, row_idx_shuffled], columns[:, col_idx_shuffled])
)
print(f"consensus score: {score:.1f}")
# %%
# The score is between 0 and 1, where 1 corresponds to a perfect matching. It
# shows the quality of the biclustering.
# %%
# Plotting results
# ----------------
# Now, we rearrange the data based on the row and column labels assigned by the
# :class:`~sklearn.cluster.SpectralBiclustering` model in ascending order and
# plot again. The `row_labels_` range from 0 to 3, while the `column_labels_`
# range from 0 to 2, representing a total of 4 clusters per row and 3 clusters
# per column.
# Reordering first the rows and then the columns.
reordered_rows = data[np.argsort(model.row_labels_)]
reordered_data = reordered_rows[:, np.argsort(model.column_labels_)]
plt.matshow(reordered_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
_ = plt.show()
# %%
# As a last step, we want to demonstrate the relationships between the row
# and column labels assigned by the model. Therefore, we create a grid with
# :func:`numpy.outer`, which takes the sorted `row_labels_` and `column_labels_`
# and adds 1 to each to ensure that the labels start from 1 instead of 0 for
# better visualization.
plt.matshow(
np.outer(np.sort(model.row_labels_) + 1, np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues,
)
plt.title("Checkerboard structure of rearranged data")
plt.show()
# %%
# The outer product of the row and column label vectors shows a representation
# of the checkerboard structure, where different combinations of row and column
# labels are represented by different shades of blue.
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FCOS',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# testing settings
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# training schedule for 1x
train_cfg = dict(by_epoch=True, max_epochs=12)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=35, norm_type=2)))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FCOS',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# testing settings
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# training schedule for 1x
train_cfg = dict(by_epoch=True, max_epochs=12)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)))
default_hooks = dict(
optimizer=dict(
_delete_=True,
type='OptimizerHook',
grad_clip=dict(max_norm=35, norm_type=2)))
|
import defusedxml.ElementTree as ET
import pytest
from llama_index.readers.file.xml import XMLReader
# Sample XML data for testing
SAMPLE_XML = """<?xml version="1.0" encoding="UTF-8"?>
<data>
<item type="fruit">
<name>Apple</name>
<color>Red</color>
<price>1.20</price>
</item>
<item type="vegetable">
<name>Carrot</name>
<color>Orange</color>
<price>0.50</price>
</item>
<item type="fruit">
<name>Banana</name>
<color>Yellow</color>
<price>0.30</price>
</item>
<company>
<name>Fresh Produce Ltd.</name>
<address>
<street>123 Green Lane</street>
<city>Garden City</city>
<state>Harvest</state>
<zip>54321</zip>
</address>
</company>
</data>"""
# Fixture to create a temporary XML file
@pytest.fixture()
def xml_file(tmp_path):
file = tmp_path / "test.xml"
with open(file, "w") as f:
f.write(SAMPLE_XML)
return file
def test_xml_reader_init():
reader = XMLReader(tree_level_split=2)
assert reader.tree_level_split == 2
def test_parse_xml_to_document():
reader = XMLReader(1)
root = ET.fromstring(SAMPLE_XML)
documents = reader._parse_xmlelt_to_document(root)
assert "Fresh Produce Ltd." in documents[-1].text
assert "fruit" in documents[0].text
def test_load_data_xml(xml_file):
reader = XMLReader()
documents = reader.load_data(xml_file)
assert len(documents) == 1
assert "Apple" in documents[0].text
assert "Garden City" in documents[0].text
|
import xml.etree.ElementTree as ET
import pytest
from llama_index.readers.file.xml import XMLReader
# Sample XML data for testing
SAMPLE_XML = """<?xml version="1.0" encoding="UTF-8"?>
<data>
<item type="fruit">
<name>Apple</name>
<color>Red</color>
<price>1.20</price>
</item>
<item type="vegetable">
<name>Carrot</name>
<color>Orange</color>
<price>0.50</price>
</item>
<item type="fruit">
<name>Banana</name>
<color>Yellow</color>
<price>0.30</price>
</item>
<company>
<name>Fresh Produce Ltd.</name>
<address>
<street>123 Green Lane</street>
<city>Garden City</city>
<state>Harvest</state>
<zip>54321</zip>
</address>
</company>
</data>"""
# Fixture to create a temporary XML file
@pytest.fixture()
def xml_file(tmp_path):
file = tmp_path / "test.xml"
with open(file, "w") as f:
f.write(SAMPLE_XML)
return file
def test_xml_reader_init():
reader = XMLReader(tree_level_split=2)
assert reader.tree_level_split == 2
def test_parse_xml_to_document():
reader = XMLReader(1)
root = ET.fromstring(SAMPLE_XML)
documents = reader._parse_xmlelt_to_document(root)
assert "Fresh Produce Ltd." in documents[-1].text
assert "fruit" in documents[0].text
def test_load_data_xml(xml_file):
reader = XMLReader()
documents = reader.load_data(xml_file)
assert len(documents) == 1
assert "Apple" in documents[0].text
assert "Garden City" in documents[0].text
|
"""Callback Handler that tracks AIMessage.usage_metadata."""
import threading
from collections.abc import Generator
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Optional
from langchain_core._api import beta
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import AIMessage
from langchain_core.messages.ai import UsageMetadata, add_usage
from langchain_core.outputs import ChatGeneration, LLMResult
@beta()
class UsageMetadataCallbackHandler(BaseCallbackHandler):
"""Callback Handler that tracks AIMessage.usage_metadata.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import UsageMetadataCallbackHandler
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
callback = UsageMetadataCallbackHandler()
result_1 = llm_1.invoke("Hello", config={"callbacks": [callback]})
result_2 = llm_2.invoke("Hello", config={"callbacks": [callback]})
callback.usage_metadata
.. code-block:: none
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
'output_tokens': 10,
'total_tokens': 18,
'input_token_details': {'audio': 0, 'cache_read': 0},
'output_token_details': {'audio': 0, 'reasoning': 0}},
'claude-3-5-haiku-20241022': {'input_tokens': 8,
'output_tokens': 21,
'total_tokens': 29,
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
.. versionadded:: 0.3.49
"""
def __init__(self) -> None:
super().__init__()
self._lock = threading.Lock()
self.usage_metadata: dict[str, UsageMetadata] = {}
def __repr__(self) -> str:
return str(self.usage_metadata)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Collect token usage."""
# Check for usage_metadata (langchain-core >= 0.2.2)
try:
generation = response.generations[0][0]
except IndexError:
generation = None
usage_metadata = None
model_name = None
if isinstance(generation, ChatGeneration):
try:
message = generation.message
if isinstance(message, AIMessage):
usage_metadata = message.usage_metadata
model_name = message.response_metadata.get("model_name")
except AttributeError:
pass
# update shared state behind lock
if usage_metadata and model_name:
with self._lock:
if model_name not in self.usage_metadata:
self.usage_metadata[model_name] = usage_metadata
else:
self.usage_metadata[model_name] = add_usage(
self.usage_metadata[model_name], usage_metadata
)
@contextmanager
@beta()
def get_usage_metadata_callback(
name: str = "usage_metadata_callback",
) -> Generator[UsageMetadataCallbackHandler, None, None]:
"""Get context manager for tracking usage metadata across chat model calls using
``AIMessage.usage_metadata``.
Args:
name (str): The name of the context variable. Defaults to
``"usage_metadata_callback"``.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import get_usage_metadata_callback
llm_1 = init_chat_model(model="openai:gpt-4o-mini")
llm_2 = init_chat_model(model="anthropic:claude-3-5-haiku-latest")
with get_usage_metadata_callback() as cb:
llm_1.invoke("Hello")
llm_2.invoke("Hello")
print(cb.usage_metadata)
.. code-block:: none
{'gpt-4o-mini-2024-07-18': {'input_tokens': 8,
'output_tokens': 10,
'total_tokens': 18,
'input_token_details': {'audio': 0, 'cache_read': 0},
'output_token_details': {'audio': 0, 'reasoning': 0}},
'claude-3-5-haiku-20241022': {'input_tokens': 8,
'output_tokens': 21,
'total_tokens': 29,
'input_token_details': {'cache_read': 0, 'cache_creation': 0}}}
.. versionadded:: 0.3.49
"""
from langchain_core.tracers.context import register_configure_hook
usage_metadata_callback_var: ContextVar[Optional[UsageMetadataCallbackHandler]] = (
ContextVar(name, default=None)
)
register_configure_hook(usage_metadata_callback_var, True)
cb = UsageMetadataCallbackHandler()
usage_metadata_callback_var.set(cb)
yield cb
usage_metadata_callback_var.set(None)
|
"""Callback Handler that tracks AIMessage.usage_metadata."""
import threading
from collections.abc import Generator
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Optional
from langchain_core._api import beta
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import AIMessage
from langchain_core.messages.ai import UsageMetadata, add_usage
from langchain_core.outputs import ChatGeneration, LLMResult
@beta()
class UsageMetadataCallbackHandler(BaseCallbackHandler):
"""Callback Handler that tracks AIMessage.usage_metadata.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import UsageMetadataCallbackHandler
llm = init_chat_model(model="openai:gpt-4o-mini")
callback = UsageMetadataCallbackHandler()
results = llm.batch(["Hello", "Goodbye"], config={"callbacks": [callback]})
print(callback.usage_metadata)
.. code-block:: none
{'output_token_details': {'audio': 0, 'reasoning': 0}, 'input_tokens': 17, 'output_tokens': 31, 'total_tokens': 48, 'input_token_details': {'cache_read': 0, 'audio': 0}}
.. versionadded:: 0.3.49
""" # noqa: E501
def __init__(self) -> None:
super().__init__()
self._lock = threading.Lock()
self.usage_metadata: dict[str, UsageMetadata] = {}
def __repr__(self) -> str:
return str(self.usage_metadata)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Collect token usage."""
# Check for usage_metadata (langchain-core >= 0.2.2)
try:
generation = response.generations[0][0]
except IndexError:
generation = None
usage_metadata = None
model_name = None
if isinstance(generation, ChatGeneration):
try:
message = generation.message
if isinstance(message, AIMessage):
usage_metadata = message.usage_metadata
model_name = message.response_metadata.get("model_name")
except AttributeError:
pass
# update shared state behind lock
if usage_metadata and model_name:
with self._lock:
if model_name not in self.usage_metadata:
self.usage_metadata[model_name] = usage_metadata
else:
self.usage_metadata[model_name] = add_usage(
self.usage_metadata[model_name], usage_metadata
)
@contextmanager
@beta()
def get_usage_metadata_callback(
name: str = "usage_metadata_callback",
) -> Generator[UsageMetadataCallbackHandler, None, None]:
"""Get context manager for tracking usage metadata across chat model calls using
``AIMessage.usage_metadata``.
Args:
name (str): The name of the context variable. Defaults to
``"usage_metadata_callback"``.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import get_usage_metadata_callback
llm = init_chat_model(model="openai:gpt-4o-mini")
with get_usage_metadata_callback() as cb:
llm.invoke("Hello")
llm.invoke("Goodbye")
print(cb.usage_metadata)
.. code-block:: none
{'output_token_details': {'audio': 0, 'reasoning': 0}, 'input_tokens': 17, 'output_tokens': 31, 'total_tokens': 48, 'input_token_details': {'cache_read': 0, 'audio': 0}}
.. versionadded:: 0.3.49
""" # noqa: E501
from langchain_core.tracers.context import register_configure_hook
usage_metadata_callback_var: ContextVar[Optional[UsageMetadataCallbackHandler]] = (
ContextVar(name, default=None)
)
register_configure_hook(usage_metadata_callback_var, True)
cb = UsageMetadataCallbackHandler()
usage_metadata_callback_var.set(cb)
yield cb
usage_metadata_callback_var.set(None)
|
"""This modules defines all kinds of exceptions raised in Jina."""
from typing import Set, Union
import grpc.aio
class BaseJinaException(BaseException):
"""A base class for all exceptions raised by Jina"""
class RuntimeFailToStart(SystemError, BaseJinaException):
"""When pod/deployment is failed to started."""
class RuntimeTerminated(KeyboardInterrupt, BaseJinaException):
"""The event loop of BasePod ends."""
class FlowTopologyError(Exception, BaseJinaException):
"""Flow exception when the topology is ambiguous."""
class FlowMissingDeploymentError(Exception, BaseJinaException):
"""Flow exception when a deployment can not be found in the flow."""
class FlowBuildLevelError(Exception, BaseJinaException):
"""Flow exception when required build level is higher than the current build level."""
class BadConfigSource(FileNotFoundError, BaseJinaException):
"""The yaml config file is bad, not loadable or not exist."""
class BadClient(Exception, BaseJinaException):
"""A wrongly defined client, can not communicate with jina server correctly."""
class BadServer(Exception, BaseJinaException):
"""Error happens on the server side."""
class BadClientCallback(BadClient, BaseJinaException):
"""Error in the callback function on the client side."""
class BadClientInput(BadClient, BaseJinaException):
"""Error in the request generator function on the client side."""
class BadRequestType(TypeError, BaseJinaException):
"""Exception when can not construct a request object from given data."""
class BadImageNameError(Exception, BaseJinaException):
"""Exception when an image name can not be found either local & remote"""
class BadYAMLVersion(Exception, BaseJinaException):
"""Exception when YAML config specifies a wrong version number."""
class NotSupportedError(Exception, BaseJinaException):
"""Exception when user accidentally using a retired argument."""
class RuntimeRunForeverEarlyError(Exception, BaseJinaException):
"""Raised when an error occurs when starting the run_forever of Runtime"""
class DockerVersionError(SystemError, BaseJinaException):
"""Raised when the docker version is incompatible"""
class NoContainerizedError(Exception, BaseJinaException):
"""Raised when trying to use non-containerized Executor in K8s or Docker Compose"""
class PortAlreadyUsed(RuntimeError, BaseJinaException):
"""Raised when trying to use a port which is already used"""
class EstablishGrpcConnectionError(Exception, BaseJinaException):
"""Raised when Exception occurs when establishing or resetting gRPC connection"""
class InternalNetworkError(grpc.aio.AioRpcError, BaseJinaException):
"""
Raised when communication between microservices fails.
Needed to propagate information about the root cause event, such as request_id and dest_addr.
"""
def __init__(
self,
og_exception: grpc.aio.AioRpcError,
request_id: str = '',
dest_addr: Union[str, Set[str]] = {''},
details: str = '',
):
"""
:param og_exception: the original exception that caused the network error
:param request_id: id of the request that caused the error
:param dest_addr: destination (microservice) address(es) of the problematic network call(s)
:param details: details of the error
"""
self.og_exception = og_exception
self.request_id = request_id
self.dest_addr = dest_addr
self._details = details
super().__init__(
og_exception.code(),
og_exception.initial_metadata(),
og_exception.trailing_metadata(),
self.details(),
og_exception.debug_error_string(),
)
def __str__(self):
return self.details()
def __repr__(self):
return self.__str__()
def code(self):
"""
:return: error code of this exception
"""
return self.og_exception.code()
def details(self):
"""
:return: details of this exception
"""
return self._details if self._details else self.og_exception.details()
|
"""This modules defines all kinds of exceptions raised in Jina."""
from typing import Set, Union
import grpc.aio
class BaseJinaException(BaseException):
"""A base class for all exceptions raised by Jina"""
class RuntimeFailToStart(SystemError, BaseJinaException):
"""When pod/deployment is failed to started."""
class RuntimeTerminated(KeyboardInterrupt, BaseJinaException):
"""The event loop of BasePod ends."""
class FlowTopologyError(Exception, BaseJinaException):
"""Flow exception when the topology is ambiguous."""
class FlowMissingDeploymentError(Exception, BaseJinaException):
"""Flow exception when a deployment can not be found in the flow."""
class FlowBuildLevelError(Exception, BaseJinaException):
"""Flow exception when required build level is higher than the current build level."""
class BadConfigSource(FileNotFoundError, BaseJinaException):
"""The yaml config file is bad, not loadable or not exist."""
class BadClient(Exception, BaseJinaException):
"""A wrongly defined client, can not communicate with jina server correctly."""
class BadServer(Exception, BaseJinaException):
"""Error happens on the server side."""
class BadClientCallback(BadClient, BaseJinaException):
"""Error in the callback function on the client side."""
class BadClientInput(BadClient, BaseJinaException):
"""Error in the request generator function on the client side."""
class BadRequestType(TypeError, BaseJinaException):
"""Exception when can not construct a request object from given data."""
class BadImageNameError(Exception, BaseJinaException):
"""Exception when an image name can not be found either local & remote"""
class BadYAMLVersion(Exception, BaseJinaException):
"""Exception when YAML config specifies a wrong version number."""
class NotSupportedError(Exception, BaseJinaException):
"""Exception when user accidentally using a retired argument."""
class RuntimeRunForeverEarlyError(Exception, BaseJinaException):
"""Raised when an error occurs when starting the run_forever of Runtime"""
class DockerVersionError(SystemError, BaseJinaException):
"""Raised when the docker version is incompatible"""
class NoContainerizedError(Exception, BaseJinaException):
"""Raised when trying to use non-containerized Executor in K8s or Docker Compose"""
class PortAlreadyUsed(RuntimeError, BaseJinaException):
"""Raised when to use a port which is already used"""
class InternalNetworkError(grpc.aio.AioRpcError, BaseJinaException):
"""
Raised when communication between microservices fails.
Needed to propagate information about the root cause event, such as request_id and dest_addr.
"""
def __init__(
self,
og_exception: grpc.aio.AioRpcError,
request_id: str = '',
dest_addr: Union[str, Set[str]] = {''},
details: str = '',
):
"""
:param og_exception: the original exception that caused the network error
:param request_id: id of the request that caused the error
:param dest_addr: destination (microservice) address(es) of the problematic network call(s)
:param details: details of the error
"""
self.og_exception = og_exception
self.request_id = request_id
self.dest_addr = dest_addr
self._details = details
super().__init__(
og_exception.code(),
og_exception.initial_metadata(),
og_exception.trailing_metadata(),
self.details(),
og_exception.debug_error_string(),
)
def __str__(self):
return self.details()
def __repr__(self):
return self.__str__()
def code(self):
"""
:return: error code of this exception
"""
return self.og_exception.code()
def details(self):
"""
:return: details of this exception
"""
return self._details if self._details else self.og_exception.details()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple
import torch
from torch import Tensor
from mmdet.structures.bbox import BaseBoxes
def anchor_inside_flags(flat_anchors: Tensor,
valid_flags: Tensor,
img_shape: Tuple[int],
allowed_border: int = 0) -> Tensor:
"""Check whether the anchors are inside the border.
Args:
flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4).
valid_flags (torch.Tensor): An existing valid flags of anchors.
img_shape (tuple(int)): Shape of current image.
allowed_border (int): The border to allow the valid anchor.
Defaults to 0.
Returns:
torch.Tensor: Flags indicating whether the anchors are inside a \
valid range.
"""
img_h, img_w = img_shape[:2]
if allowed_border >= 0:
if isinstance(flat_anchors, BaseBoxes):
inside_flags = valid_flags & \
flat_anchors.is_inside([img_h, img_w],
all_inside=True,
allowed_border=allowed_border)
else:
inside_flags = valid_flags & \
(flat_anchors[:, 0] >= -allowed_border) & \
(flat_anchors[:, 1] >= -allowed_border) & \
(flat_anchors[:, 2] < img_w + allowed_border) & \
(flat_anchors[:, 3] < img_h + allowed_border)
else:
inside_flags = valid_flags
return inside_flags
def calc_region(bbox: Tensor,
ratio: float,
featmap_size: Optional[Tuple] = None) -> Tuple[int]:
"""Calculate a proportional bbox region.
The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.
Args:
bbox (Tensor): Bboxes to calculate regions, shape (n, 4).
ratio (float): Ratio of the output region.
featmap_size (tuple, Optional): Feature map size in (height, width)
order used for clipping the boundary. Defaults to None.
Returns:
tuple: x1, y1, x2, y2
"""
x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
if featmap_size is not None:
x1 = x1.clamp(min=0, max=featmap_size[1])
y1 = y1.clamp(min=0, max=featmap_size[0])
x2 = x2.clamp(min=0, max=featmap_size[1])
y2 = y2.clamp(min=0, max=featmap_size[0])
return (x1, y1, x2, y2)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.structures.bbox import BaseBoxes
def anchor_inside_flags(flat_anchors,
valid_flags,
img_shape,
allowed_border=0):
"""Check whether the anchors are inside the border.
Args:
flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4).
valid_flags (torch.Tensor): An existing valid flags of anchors.
img_shape (tuple(int)): Shape of current image.
allowed_border (int, optional): The border to allow the valid anchor.
Defaults to 0.
Returns:
torch.Tensor: Flags indicating whether the anchors are inside a \
valid range.
"""
img_h, img_w = img_shape[:2]
if allowed_border >= 0:
if isinstance(flat_anchors, BaseBoxes):
inside_flags = valid_flags & \
flat_anchors.is_inside([img_h, img_w],
all_inside=True,
allowed_border=allowed_border)
else:
inside_flags = valid_flags & \
(flat_anchors[:, 0] >= -allowed_border) & \
(flat_anchors[:, 1] >= -allowed_border) & \
(flat_anchors[:, 2] < img_w + allowed_border) & \
(flat_anchors[:, 3] < img_h + allowed_border)
else:
inside_flags = valid_flags
return inside_flags
def calc_region(bbox, ratio, featmap_size=None):
"""Calculate a proportional bbox region.
The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.
Args:
bbox (Tensor): Bboxes to calculate regions, shape (n, 4).
ratio (float): Ratio of the output region.
featmap_size (tuple): Feature map size used for clipping the boundary.
Returns:
tuple: x1, y1, x2, y2
"""
x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
if featmap_size is not None:
x1 = x1.clamp(min=0, max=featmap_size[1])
y1 = y1.clamp(min=0, max=featmap_size[0])
x2 = x2.clamp(min=0, max=featmap_size[1])
y2 = y2.clamp(min=0, max=featmap_size[0])
return (x1, y1, x2, y2)
|
from __future__ import annotations
import collections
import json
import logging
import os
import string
from typing import Iterable
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
logger = logging.getLogger(__name__)
class PhraseTokenizer(WordTokenizer):
"""Tokenizes the text with respect to existent phrases in the vocab.
This tokenizers respects phrases that are in the vocab. Phrases are separated with 'ngram_separator', for example,
in Google News word2vec file, ngrams are separated with a _ like New_York. These phrases are detected in text and merged as one special token. (New York is the ... => [New_York, is, the])
"""
def __init__(
self,
vocab: Iterable[str] = [],
stop_words: Iterable[str] = ENGLISH_STOP_WORDS,
do_lower_case: bool = False,
ngram_separator: str = "_",
max_ngram_length: int = 5,
):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.ngram_separator = ngram_separator
self.max_ngram_length = max_ngram_length
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
# Check for ngram in vocab
self.ngram_lookup = set()
self.ngram_lengths = set()
for word in vocab:
if self.ngram_separator is not None and self.ngram_separator in word:
# Sum words might me malformed in e.g. google news word2vec, containing two or more _ after each other
ngram_count = word.count(self.ngram_separator) + 1
if self.ngram_separator + self.ngram_separator not in word and ngram_count <= self.max_ngram_length:
self.ngram_lookup.add(word)
self.ngram_lengths.add(ngram_count)
if len(vocab) > 0:
logger.info(f"PhraseTokenizer - Phrase ngram lengths: {self.ngram_lengths}")
logger.info(f"PhraseTokenizer - Num phrases: {len(self.ngram_lookup)}")
def tokenize(self, text: str, **kwargs) -> list[int]:
from nltk import word_tokenize
tokens = word_tokenize(text, preserve_line=True)
# phrase detection
for ngram_len in sorted(self.ngram_lengths, reverse=True):
idx = 0
while idx <= len(tokens) - ngram_len:
ngram = self.ngram_separator.join(tokens[idx : idx + ngram_len])
if ngram in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram]
elif ngram.lower() in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram.lower()]
idx += 1
# Map tokens to idx, filter stop words
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "phrasetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
"ngram_separator": self.ngram_separator,
"max_ngram_length": self.max_ngram_length,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "phrasetokenizer_config.json")) as fIn:
config = json.load(fIn)
return PhraseTokenizer(**config)
|
from __future__ import annotations
import collections
import json
import logging
import os
import string
from typing import Iterable
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
logger = logging.getLogger(__name__)
class PhraseTokenizer(WordTokenizer):
"""Tokenizes the text with respect to existent phrases in the vocab.
This tokenizers respects phrases that are in the vocab. Phrases are separated with 'ngram_separator', for example,
in Google News word2vec file, ngrams are separated with a _ like New_York. These phrases are detected in text and merged as one special token. (New York is the ... => [New_York, is, the])
"""
def __init__(
self,
vocab: Iterable[str] = [],
stop_words: Iterable[str] = ENGLISH_STOP_WORDS,
do_lower_case: bool = False,
ngram_separator: str = "_",
max_ngram_length: int = 5,
):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.ngram_separator = ngram_separator
self.max_ngram_length = max_ngram_length
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
# Check for ngram in vocab
self.ngram_lookup = set()
self.ngram_lengths = set()
for word in vocab:
if self.ngram_separator is not None and self.ngram_separator in word:
# Sum words might me malformed in e.g. google news word2vec, containing two or more _ after each other
ngram_count = word.count(self.ngram_separator) + 1
if self.ngram_separator + self.ngram_separator not in word and ngram_count <= self.max_ngram_length:
self.ngram_lookup.add(word)
self.ngram_lengths.add(ngram_count)
if len(vocab) > 0:
logger.info("PhraseTokenizer - Phrase ngram lengths: {}".format(self.ngram_lengths))
logger.info("PhraseTokenizer - Num phrases: {}".format(len(self.ngram_lookup)))
def tokenize(self, text: str, **kwargs) -> list[int]:
from nltk import word_tokenize
tokens = word_tokenize(text, preserve_line=True)
# phrase detection
for ngram_len in sorted(self.ngram_lengths, reverse=True):
idx = 0
while idx <= len(tokens) - ngram_len:
ngram = self.ngram_separator.join(tokens[idx : idx + ngram_len])
if ngram in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram]
elif ngram.lower() in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram.lower()]
idx += 1
# Map tokens to idx, filter stop words
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "phrasetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
"ngram_separator": self.ngram_separator,
"max_ngram_length": self.max_ngram_length,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "phrasetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return PhraseTokenizer(**config)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='CenterNet',
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5,
# There is a chance to get 40.3 after switching init_cfg,
# otherwise it is about 39.9~40.1
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d'),
relu_before_extra_convs=True),
bbox_head=dict(
type='CenterNetUpdateHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
hm_min_radius=4,
hm_min_overlap=0.8,
more_pos_thresh=0.2,
more_pos_topk=9,
soft_weight_on_reg=False,
loss_cls=dict(
type='GaussianFocalLoss',
pos_weight=0.25,
neg_weight=0.75,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
),
train_cfg=None,
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# single-scale training is about 39.3
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.00025,
by_epoch=False,
begin=0,
end=4000),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
optim_wrapper = dict(
optimizer=dict(lr=0.01),
# Experiments show that there is no need to turn on clip_grad.
paramwise_cfg=dict(norm_decay_mult=0.))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=16)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='CenterNet',
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5,
# There is a chance to get 40.3 after switching init_cfg,
# otherwise it is about 39.9~40.1
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d'),
relu_before_extra_convs=True),
bbox_head=dict(
type='CenterNetUpdateHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
hm_min_radius=4,
hm_min_overlap=0.8,
more_pos_thresh=0.2,
more_pos_topk=9,
soft_weight_on_reg=False,
loss_cls=dict(
type='GaussianFocalLoss',
pos_weight=0.25,
neg_weight=0.75,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
),
train_cfg=None,
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# single-scale training is about 39.3
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.00025,
by_epoch=False,
begin=0,
end=4000),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
optim_wrapper = dict(
optimizer=dict(lr=0.01),
# Experiments show that there is no need to turn on clip_grad.
paramwise_cfg=dict(norm_decay_mult=0.))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=16)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import CocoDataset
class TestCocoDataset(unittest.TestCase):
def test_coco_dataset(self):
# test CocoDataset
metainfo = dict(CLASSES=('bus', 'car'), task_name='new_task')
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
metainfo=metainfo,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[])
self.assertEqual(dataset.metainfo['CLASSES'], ('bus', 'car'))
self.assertEqual(dataset.metainfo['task_name'], 'new_task')
def test_coco_dataset_without_filter_cfg(self):
# test CocoDataset without filter_cfg
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
pipeline=[])
self.assertEqual(len(dataset), 2)
def test_coco_annotation_ids_unique(self):
# test annotation ids not unique error
metainfo = dict(CLASSES=('car', ), task_name='new_task')
with self.assertRaisesRegex(AssertionError, 'are not unique!'):
CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_wrong_format_sample.json',
metainfo=metainfo,
pipeline=[])
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.datasets import CocoDataset
class TestCocoDataset:
def test_coco_dataset(self):
# test CocoDataset
metainfo = dict(CLASSES=('bus', 'car'), task_name='new_task')
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
metainfo=metainfo,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[])
assert dataset.metainfo['CLASSES'] == ('bus', 'car')
assert dataset.metainfo['task_name'] == 'new_task'
def test_coco_dataset_without_filter_cfg(self):
# test CocoDataset without filter_cfg
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
pipeline=[])
assert len(dataset) == 2
def test_coco_annotation_ids_unique(self):
# test annotation ids not unique error
metainfo = dict(CLASSES=('car', ), task_name='new_task')
with pytest.raises(AssertionError):
CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_wrong_format_sample.json',
metainfo=metainfo,
pipeline=[])
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
import sys
import tempfile
import unittest
git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
REFERENCE_CODE = """ \"""
Output class for the scheduler's `step` function output.
Args:
prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.Tensor
pred_original_sample: Optional[torch.Tensor] = None
"""
class CopyCheckTester(unittest.TestCase):
def setUp(self):
self.diffusers_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir, "schedulers/"))
check_copies.DIFFUSERS_PATH = self.diffusers_dir
shutil.copy(
os.path.join(git_repo_path, "src/diffusers/schedulers/scheduling_ddpm.py"),
os.path.join(self.diffusers_dir, "schedulers/scheduling_ddpm.py"),
)
def tearDown(self):
check_copies.DIFFUSERS_PATH = "src/diffusers"
shutil.rmtree(self.diffusers_dir)
def check_copy_consistency(self, comment, class_name, class_code, overwrite_result=None):
code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
code = check_copies.run_ruff(code)
fname = os.path.join(self.diffusers_dir, "new_code.py")
with open(fname, "w", newline="\n") as f:
f.write(code)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(fname)) == 0)
else:
check_copies.is_copy_consistent(f.name, overwrite=True)
with open(fname, "r") as f:
self.assertTrue(f.read(), expected)
def test_find_code_in_diffusers(self):
code = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput")
self.assertEqual(code, REFERENCE_CODE)
def test_is_copy_consistent(self):
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput",
"DDPMSchedulerOutput",
REFERENCE_CODE + "\n",
)
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput",
"DDPMSchedulerOutput",
REFERENCE_CODE,
)
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test",
"TestSchedulerOutput",
re.sub("DDPM", "Test", REFERENCE_CODE),
)
# Copy consistency with a really long name
long_class_name = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}",
f"{long_class_name}SchedulerOutput",
re.sub("Bert", long_class_name, REFERENCE_CODE),
)
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test",
"TestSchedulerOutput",
REFERENCE_CODE,
overwrite_result=re.sub("DDPM", "Test", REFERENCE_CODE),
)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
import sys
import tempfile
import unittest
git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
REFERENCE_CODE = """ \"""
Output class for the scheduler's `step` function output.
Args:
prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.Tensor
pred_original_sample: Optional[torch.Tensor] = None
"""
class CopyCheckTester(unittest.TestCase):
def setUp(self):
self.diffusers_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir, "schedulers/"))
check_copies.DIFFUSERS_PATH = self.diffusers_dir
shutil.copy(
os.path.join(git_repo_path, "src/diffusers/schedulers/scheduling_ddpm.py"),
os.path.join(self.diffusers_dir, "schedulers/scheduling_ddpm.py"),
)
def tearDown(self):
check_copies.DIFFUSERS_PATH = "src/diffusers"
shutil.rmtree(self.diffusers_dir)
def check_copy_consistency(self, comment, class_name, class_code, overwrite_result=None):
code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
code = check_copies.run_ruff(code)
fname = os.path.join(self.diffusers_dir, "new_code.py")
with open(fname, "w", newline="\n") as f:
f.write(code)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(fname)) == 0)
else:
check_copies.is_copy_consistent(f.name, overwrite=True)
with open(fname, "r") as f:
self.assertTrue(f.read(), expected)
def test_find_code_in_diffusers(self):
code = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput")
self.assertEqual(code, REFERENCE_CODE)
def test_is_copy_consistent(self):
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput",
"DDPMSchedulerOutput",
REFERENCE_CODE + "\n",
)
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput",
"DDPMSchedulerOutput",
REFERENCE_CODE,
)
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test",
"TestSchedulerOutput",
re.sub("DDPM", "Test", REFERENCE_CODE),
)
# Copy consistency with a really long name
long_class_name = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}",
f"{long_class_name}SchedulerOutput",
re.sub("Bert", long_class_name, REFERENCE_CODE),
)
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test",
"TestSchedulerOutput",
REFERENCE_CODE,
overwrite_result=re.sub("DDPM", "Test", REFERENCE_CODE),
)
|
_base_ = [
'./faster-rcnn_r50_fpn.py', './mot_challenge.py',
'../../../configs/_base_/default_runtime.py'
]
model = dict(
type='Tracktor',
pretrains=dict(
detector= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth', # noqa: E501
reid= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/reid/reid_r50_6e_mot17-4bf6b63d.pth' # noqa: E501
),
detector=dict(
rpn_head=dict(bbox_coder=dict(clip_border=False)),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(
clip_border=False), num_classes=1))),
reid=dict(
type='BaseReID',
backbone=dict(
type='ResNet',
depth=18,
base_channels=2,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1),
head=dict(
type='LinearReIDHead',
num_fcs=1,
in_channels=16,
fc_channels=32,
out_channels=16,
num_classes=8,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
loss_pairwise=dict(
type='TripletLoss', margin=0.3, loss_weight=1.0),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'))),
motion=dict(
type='CameraMotionCompensation',
warp_mode='cv2.MOTION_EUCLIDEAN',
num_iters=100,
stop_eps=0.00001),
tracker=dict(
type='TracktorTracker',
obj_score_thr=0.5,
regression=dict(
obj_score_thr=0.5,
nms=dict(type='nms', iou_threshold=0.6),
match_iou_thr=0.3),
reid=dict(
num_samples=10,
img_scale=(256, 128),
img_norm_cfg=None,
match_score_thr=2.0,
match_iou_thr=0.2),
momentums=None,
num_frames_retain=10))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=100,
warmup_ratio=1.0 / 100,
step=[3])
# runtime settings
total_epochs = 4
evaluation = dict(metric=['bbox', 'track'], interval=1)
search_metrics = ['MOTA', 'IDF1', 'FN', 'FP', 'IDs', 'MT', 'ML']
|
_base_ = [
'./faster_rcnn_r50_fpn.py', './mot_challenge.py',
'../../../configs/_base_/default_runtime.py'
]
model = dict(
type='Tracktor',
pretrains=dict(
detector= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth', # noqa: E501
reid= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/reid/reid_r50_6e_mot17-4bf6b63d.pth' # noqa: E501
),
detector=dict(
rpn_head=dict(bbox_coder=dict(clip_border=False)),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(
clip_border=False), num_classes=1))),
reid=dict(
type='BaseReID',
backbone=dict(
type='ResNet',
depth=18,
base_channels=2,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1),
head=dict(
type='LinearReIDHead',
num_fcs=1,
in_channels=16,
fc_channels=32,
out_channels=16,
num_classes=8,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
loss_pairwise=dict(
type='TripletLoss', margin=0.3, loss_weight=1.0),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'))),
motion=dict(
type='CameraMotionCompensation',
warp_mode='cv2.MOTION_EUCLIDEAN',
num_iters=100,
stop_eps=0.00001),
tracker=dict(
type='TracktorTracker',
obj_score_thr=0.5,
regression=dict(
obj_score_thr=0.5,
nms=dict(type='nms', iou_threshold=0.6),
match_iou_thr=0.3),
reid=dict(
num_samples=10,
img_scale=(256, 128),
img_norm_cfg=None,
match_score_thr=2.0,
match_iou_thr=0.2),
momentums=None,
num_frames_retain=10))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=100,
warmup_ratio=1.0 / 100,
step=[3])
# runtime settings
total_epochs = 4
evaluation = dict(metric=['bbox', 'track'], interval=1)
search_metrics = ['MOTA', 'IDF1', 'FN', 'FP', 'IDs', 'MT', 'ML']
|
import keras.src.backend
from keras.src import tree
from keras.src.layers.layer import Layer
from keras.src.random.seed_generator import SeedGenerator
from keras.src.utils import backend_utils
from keras.src.utils import tracking
class TFDataLayer(Layer):
"""Layer that can safely used in a tf.data pipeline.
The `call()` method must solely rely on `self.backend` ops.
Only supports a single input tensor argument.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.backend = backend_utils.DynamicBackend()
self._allow_non_tensor_positional_args = True
def __call__(self, inputs, **kwargs):
if backend_utils.in_tf_graph() and not isinstance(
inputs, keras.KerasTensor
):
# We're in a TF graph, e.g. a tf.data pipeline.
self.backend.set_backend("tensorflow")
inputs = tree.map_structure(
lambda x: self.backend.convert_to_tensor(
x, dtype=self.compute_dtype
),
inputs,
)
switch_convert_input_args = False
if self._convert_input_args:
self._convert_input_args = False
switch_convert_input_args = True
try:
outputs = super().__call__(inputs, **kwargs)
finally:
self.backend.reset()
if switch_convert_input_args:
self._convert_input_args = True
return outputs
return super().__call__(inputs, **kwargs)
@tracking.no_automatic_dependency_tracking
def _get_seed_generator(self, backend=None):
if backend is None or backend == keras.backend.backend():
return self.generator
if not hasattr(self, "_backend_generators"):
self._backend_generators = {}
if backend in self._backend_generators:
return self._backend_generators[backend]
seed_generator = SeedGenerator(self.seed, backend=self.backend)
self._backend_generators[backend] = seed_generator
return seed_generator
def convert_weight(self, weight):
"""Convert the weight if it is from the a different backend."""
if self.backend.name == keras.backend.backend():
return weight
else:
weight = keras.ops.convert_to_numpy(weight)
return self.backend.convert_to_tensor(weight)
|
import keras.src.backend
from keras.src import tree
from keras.src.layers.layer import Layer
from keras.src.random.seed_generator import SeedGenerator
from keras.src.utils import backend_utils
from keras.src.utils import tracking
class TFDataLayer(Layer):
"""Layer that can safely used in a tf.data pipeline.
The `call()` method must solely rely on `self.backend` ops.
Only supports a single input tensor argument.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.backend = backend_utils.DynamicBackend()
self._allow_non_tensor_positional_args = True
def __call__(self, inputs, **kwargs):
if backend_utils.in_tf_graph() and not isinstance(
inputs, keras.KerasTensor
):
# We're in a TF graph, e.g. a tf.data pipeline.
self.backend.set_backend("tensorflow")
inputs = tree.map_structure(
lambda x: self.backend.convert_to_tensor(
x, dtype=self.compute_dtype
),
inputs,
)
switch_convert_input_args = False
if self._convert_input_args:
self._convert_input_args = False
switch_convert_input_args = True
try:
outputs = super().__call__(inputs, **kwargs)
finally:
self.backend.reset()
if switch_convert_input_args:
self._convert_input_args = True
return outputs
return super().__call__(inputs, **kwargs)
@tracking.no_automatic_dependency_tracking
def _get_seed_generator(self, backend=None):
if backend is None or backend == keras.backend.backend():
return self.generator
if not hasattr(self, "_backend_generators"):
self._backend_generators = {}
if backend in self._backend_generators:
return self._backend_generators[backend]
seed_generator = SeedGenerator(self.seed, backend=self.backend)
self._backend_generators[backend] = seed_generator
return seed_generator
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock
import torch.nn as nn
from torch.optim import SGD
from mmengine.hooks import RuntimeInfoHook
from mmengine.logging import MessageHub
from mmengine.optim import OptimWrapper, OptimWrapperDict
class TestRuntimeInfoHook(TestCase):
def test_before_train(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train')
runner = Mock()
runner.epoch = 7
runner.iter = 71
runner.max_epochs = 4
runner.max_iters = 40
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train(runner)
self.assertEqual(message_hub.get_info('epoch'), 7)
self.assertEqual(message_hub.get_info('iter'), 71)
self.assertEqual(message_hub.get_info('max_epochs'), 4)
self.assertEqual(message_hub.get_info('max_iters'), 40)
def test_before_train_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_epoch')
runner = Mock()
runner.epoch = 9
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train_epoch(runner)
self.assertEqual(message_hub.get_info('epoch'), 9)
def test_before_train_iter(self):
model = nn.Linear(1, 1)
optim1 = SGD(model.parameters(), lr=0.01)
optim2 = SGD(model.parameters(), lr=0.02)
optim_wrapper1 = OptimWrapper(optim1)
optim_wrapper2 = OptimWrapper(optim2)
optim_wrapper_dict = OptimWrapperDict(
key1=optim_wrapper1, key2=optim_wrapper2)
# single optimizer
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_iter')
runner = Mock()
runner.iter = 9
runner.optim_wrapper = optim_wrapper1
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train_iter(runner, batch_idx=2, data_batch=None)
self.assertEqual(message_hub.get_info('iter'), 9)
self.assertEqual(message_hub.get_scalar('train/lr').current(), 0.01)
with self.assertRaisesRegex(AssertionError,
'runner.optim_wrapper.get_lr()'):
runner.optim_wrapper = Mock()
runner.optim_wrapper.get_lr = Mock(return_value='error type')
hook.before_train_iter(runner, batch_idx=2, data_batch=None)
# multiple optimizers
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_iter')
runner = Mock()
runner.iter = 9
optimizer1 = Mock()
optimizer1.param_groups = [{'lr': 0.01}]
optimizer2 = Mock()
optimizer2.param_groups = [{'lr': 0.02}]
runner.message_hub = message_hub
runner.optim_wrapper = optim_wrapper_dict
hook = RuntimeInfoHook()
hook.before_train_iter(runner, batch_idx=2, data_batch=None)
self.assertEqual(message_hub.get_info('iter'), 9)
self.assertEqual(
message_hub.get_scalar('train/key1.lr').current(), 0.01)
self.assertEqual(
message_hub.get_scalar('train/key2.lr').current(), 0.02)
def test_after_train_iter(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_train_iter')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_train_iter(
runner, batch_idx=2, data_batch=None, outputs={'loss_cls': 1.111})
self.assertEqual(
message_hub.get_scalar('train/loss_cls').current(), 1.111)
def test_after_val_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_val_epoch')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_val_epoch(runner, metrics={'acc': 0.8})
self.assertEqual(message_hub.get_scalar('val/acc').current(), 0.8)
def test_after_test_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_test_epoch')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_test_epoch(runner, metrics={'acc': 0.8})
self.assertEqual(message_hub.get_scalar('test/acc').current(), 0.8)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock
import torch.nn as nn
from torch.optim import SGD
from mmengine.hooks import RuntimeInfoHook
from mmengine.logging import MessageHub
from mmengine.optim import OptimWrapper, OptimWrapperDict
class TestRuntimeInfoHook(TestCase):
def test_before_run(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_run')
runner = Mock()
runner.epoch = 3
runner.iter = 30
runner.max_epochs = 4
runner.max_iters = 40
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_run(runner)
self.assertEqual(message_hub.get_info('epoch'), 3)
self.assertEqual(message_hub.get_info('iter'), 30)
self.assertEqual(message_hub.get_info('max_epochs'), 4)
self.assertEqual(message_hub.get_info('max_iters'), 40)
def test_before_train(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train')
runner = Mock()
runner.epoch = 7
runner.iter = 71
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train(runner)
self.assertEqual(message_hub.get_info('epoch'), 7)
self.assertEqual(message_hub.get_info('iter'), 71)
def test_before_train_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_epoch')
runner = Mock()
runner.epoch = 9
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train_epoch(runner)
self.assertEqual(message_hub.get_info('epoch'), 9)
def test_before_train_iter(self):
model = nn.Linear(1, 1)
optim1 = SGD(model.parameters(), lr=0.01)
optim2 = SGD(model.parameters(), lr=0.02)
optim_wrapper1 = OptimWrapper(optim1)
optim_wrapper2 = OptimWrapper(optim2)
optim_wrapper_dict = OptimWrapperDict(
key1=optim_wrapper1, key2=optim_wrapper2)
# single optimizer
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_iter')
runner = Mock()
runner.iter = 9
runner.optim_wrapper = optim_wrapper1
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train_iter(runner, batch_idx=2, data_batch=None)
self.assertEqual(message_hub.get_info('iter'), 9)
self.assertEqual(message_hub.get_scalar('train/lr').current(), 0.01)
with self.assertRaisesRegex(AssertionError,
'runner.optim_wrapper.get_lr()'):
runner.optim_wrapper = Mock()
runner.optim_wrapper.get_lr = Mock(return_value='error type')
hook.before_train_iter(runner, batch_idx=2, data_batch=None)
# multiple optimizers
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_iter')
runner = Mock()
runner.iter = 9
optimizer1 = Mock()
optimizer1.param_groups = [{'lr': 0.01}]
optimizer2 = Mock()
optimizer2.param_groups = [{'lr': 0.02}]
runner.message_hub = message_hub
runner.optim_wrapper = optim_wrapper_dict
hook = RuntimeInfoHook()
hook.before_train_iter(runner, batch_idx=2, data_batch=None)
self.assertEqual(message_hub.get_info('iter'), 9)
self.assertEqual(
message_hub.get_scalar('train/key1.lr').current(), 0.01)
self.assertEqual(
message_hub.get_scalar('train/key2.lr').current(), 0.02)
def test_after_train_iter(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_train_iter')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_train_iter(
runner, batch_idx=2, data_batch=None, outputs={'loss_cls': 1.111})
self.assertEqual(
message_hub.get_scalar('train/loss_cls').current(), 1.111)
def test_after_val_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_val_epoch')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_val_epoch(runner, metrics={'acc': 0.8})
self.assertEqual(message_hub.get_scalar('val/acc').current(), 0.8)
def test_after_test_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_test_epoch')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_test_epoch(runner, metrics={'acc': 0.8})
self.assertEqual(message_hub.get_scalar('test/acc').current(), 0.8)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.