input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_loop import BaseLoop
from .checkpoint import (CheckpointLoader, get_deprecated_model_names,
get_external_models, get_mmcls_models, get_state_dict,
get_torchvision_models, load_checkpoint,
load_state_dict, save_checkpoint, weights_to_cpu)
from .loops import EpochBasedTrainLoop, IterBasedTrainLoop, TestLoop, ValLoop
from .runner import Runner
__all__ = [
'BaseLoop', 'load_state_dict', 'get_torchvision_models',
'get_external_models', 'get_mmcls_models', 'get_deprecated_model_names',
'CheckpointLoader', 'load_checkpoint', 'weights_to_cpu', 'get_state_dict',
'save_checkpoint', 'EpochBasedTrainLoop', 'IterBasedTrainLoop', 'ValLoop',
'TestLoop', 'Runner'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint import (CheckpointLoader, get_deprecated_model_names,
get_external_models, get_mmcls_models, get_state_dict,
get_torchvision_models, load_checkpoint,
load_state_dict, save_checkpoint, weights_to_cpu)
__all__ = [
'load_state_dict', 'get_torchvision_models', 'get_external_models',
'get_mmcls_models', 'get_deprecated_model_names', 'CheckpointLoader',
'load_checkpoint', 'weights_to_cpu', 'get_state_dict', 'save_checkpoint'
]
|
"""Embeddings."""
from typing import Any, List
from unittest.mock import patch
from llama_index.core.base.embeddings.base import SimilarityMode, mean_agg
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
def mock_get_text_embedding(text: str) -> List[float]:
"""Mock get text embedding."""
# assume dimensions are 5
if text == "Hello world.":
return [1, 0, 0, 0, 0]
elif text == "This is a test.":
return [0, 1, 0, 0, 0]
elif text == "This is another test.":
return [0, 0, 1, 0, 0]
elif text == "This is a test v2.":
return [0, 0, 0, 1, 0]
elif text == "This is a test v3.":
return [0, 0, 0, 0, 1]
elif text == "This is bar test.":
return [0, 0, 1, 0, 0]
elif text == "Hello world backup.":
# this is used when "Hello world." is deleted.
return [1, 0, 0, 0, 0]
else:
raise ValueError("Invalid text for `mock_get_text_embedding`.")
def mock_get_text_embeddings(texts: List[str]) -> List[List[float]]:
"""Mock get text embeddings."""
return [mock_get_text_embedding(text) for text in texts]
@patch.object(MockEmbedding, "_get_text_embedding", side_effect=mock_get_text_embedding)
@patch.object(
MockEmbedding, "_get_text_embeddings", side_effect=mock_get_text_embeddings
)
def test_get_text_embeddings(
_mock_get_text_embeddings: Any, _mock_get_text_embedding: Any
) -> None:
"""Test get queued text embeddings."""
embed_model = MockEmbedding(embed_dim=8)
texts_to_embed = []
for i in range(8):
texts_to_embed.append("Hello world.")
for i in range(8):
texts_to_embed.append("This is a test.")
for i in range(4):
texts_to_embed.append("This is another test.")
for i in range(4):
texts_to_embed.append("This is a test v2.")
result_embeddings = embed_model.get_text_embedding_batch(texts_to_embed)
for i in range(8):
assert result_embeddings[i] == [1, 0, 0, 0, 0]
for i in range(8, 16):
assert result_embeddings[i] == [0, 1, 0, 0, 0]
for i in range(16, 20):
assert result_embeddings[i] == [0, 0, 1, 0, 0]
for i in range(20, 24):
assert result_embeddings[i] == [0, 0, 0, 1, 0]
def test_embedding_similarity() -> None:
"""Test embedding similarity."""
embed_model = MockEmbedding(embed_dim=3)
text_embedding = [3.0, 4.0, 0.0]
query_embedding = [0.0, 1.0, 0.0]
cosine = embed_model.similarity(query_embedding, text_embedding)
assert cosine == 0.8
def test_embedding_similarity_euclidean() -> None:
embed_model = MockEmbedding(embed_dim=2)
query_embedding = [1.0, 0.0]
text1_embedding = [0.0, 1.0] # further from query_embedding distance=1.414
text2_embedding = [1.0, 1.0] # closer to query_embedding distance=1.0
euclidean_similarity1 = embed_model.similarity(
query_embedding, text1_embedding, mode=SimilarityMode.EUCLIDEAN
)
euclidean_similarity2 = embed_model.similarity(
query_embedding, text2_embedding, mode=SimilarityMode.EUCLIDEAN
)
assert euclidean_similarity1 < euclidean_similarity2
def test_mean_agg() -> None:
"""Test mean aggregation for embeddings."""
embedding_0 = [3.0, 4.0, 0.0]
embedding_1 = [0.0, 1.0, 0.0]
output = mean_agg([embedding_0, embedding_1])
assert output == [1.5, 2.5, 0.0]
|
"""Embeddings."""
from typing import Any, List
from unittest.mock import patch
from llama_index.core.base.embeddings.base import SimilarityMode, mean_agg
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
def mock_get_text_embedding(text: str) -> List[float]:
"""Mock get text embedding."""
# assume dimensions are 5
if text == "Hello world.":
return [1, 0, 0, 0, 0]
elif text == "This is a test.":
return [0, 1, 0, 0, 0]
elif text == "This is another test.":
return [0, 0, 1, 0, 0]
elif text == "This is a test v2.":
return [0, 0, 0, 1, 0]
elif text == "This is a test v3.":
return [0, 0, 0, 0, 1]
elif text == "This is bar test.":
return [0, 0, 1, 0, 0]
elif text == "Hello world backup.":
# this is used when "Hello world." is deleted.
return [1, 0, 0, 0, 0]
else:
raise ValueError("Invalid text for `mock_get_text_embedding`.")
def mock_get_text_embeddings(texts: List[str]) -> List[List[float]]:
"""Mock get text embeddings."""
return [mock_get_text_embedding(text) for text in texts]
@patch.object(MockEmbedding, "_get_text_embedding", side_effect=mock_get_text_embedding)
@patch.object(
MockEmbedding, "_get_text_embeddings", side_effect=mock_get_text_embeddings
)
def test_get_text_embeddings(
_mock_get_text_embeddings: Any, _mock_get_text_embedding: Any
) -> None:
"""Test get queued text embeddings."""
embed_model = MockEmbedding(embed_dim=8)
texts_to_embed = []
for i in range(8):
texts_to_embed.append("Hello world.")
for i in range(8):
texts_to_embed.append("This is a test.")
for i in range(4):
texts_to_embed.append("This is another test.")
for i in range(4):
texts_to_embed.append("This is a test v2.")
result_embeddings = embed_model.get_text_embedding_batch(texts_to_embed)
for i in range(8):
assert result_embeddings[i] == [1, 0, 0, 0, 0]
for i in range(8, 16):
assert result_embeddings[i] == [0, 1, 0, 0, 0]
for i in range(16, 20):
assert result_embeddings[i] == [0, 0, 1, 0, 0]
for i in range(20, 24):
assert result_embeddings[i] == [0, 0, 0, 1, 0]
def test_embedding_similarity() -> None:
"""Test embedding similarity."""
embed_model = MockEmbedding(embed_dim=3)
text_embedding = [3.0, 4.0, 0.0]
query_embedding = [0.0, 1.0, 0.0]
cosine = embed_model.similarity(query_embedding, text_embedding)
assert cosine == 0.8
def test_embedding_similarity_euclidean() -> None:
embed_model = MockEmbedding(embed_dim=2)
query_embedding = [1.0, 0.0]
text1_embedding = [0.0, 1.0] # further from query_embedding distance=1.414
text2_embedding = [1.0, 1.0] # closer to query_embedding distance=1.0
euclidean_similarity1 = embed_model.similarity(
query_embedding, text1_embedding, mode=SimilarityMode.EUCLIDEAN
)
euclidean_similarity2 = embed_model.similarity(
query_embedding, text2_embedding, mode=SimilarityMode.EUCLIDEAN
)
assert euclidean_similarity1 < euclidean_similarity2
def test_mean_agg() -> None:
"""Test mean aggregation for embeddings."""
embedding_0 = [3.0, 4.0, 0.0]
embedding_1 = [0.0, 1.0, 0.0]
output = mean_agg([embedding_0, embedding_1])
assert output == [1.5, 2.5, 0.0]
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
MODELS.register_module('Linear', module=nn.Linear)
@MODELS.register_module(name='NormedLinear')
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, **kwargs):
super(NormedLinear, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self):
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x):
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
@MODELS.register_module(name='NormedConv2d')
class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Default to False.
"""
def __init__(self,
*args,
tempearture=20,
power=1.0,
eps=1e-6,
norm_over_kernel=False,
**kwargs):
super(NormedConv2d, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x):
if not self.norm_over_kernel:
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) +
self.eps)
else:
weight_ = self.weight / (
self.weight.view(self.weight.size(0), -1).norm(
dim=1, keepdim=True).pow(self.power)[..., None, None] +
self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
else:
if torch.__version__ >= '1.8':
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import CONV_LAYERS
from .builder import LINEAR_LAYERS
@LINEAR_LAYERS.register_module(name='NormedLinear')
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, **kwargs):
super(NormedLinear, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self):
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x):
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
@CONV_LAYERS.register_module(name='NormedConv2d')
class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Default to False.
"""
def __init__(self,
*args,
tempearture=20,
power=1.0,
eps=1e-6,
norm_over_kernel=False,
**kwargs):
super(NormedConv2d, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x):
if not self.norm_over_kernel:
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) +
self.eps)
else:
weight_ = self.weight / (
self.weight.view(self.weight.size(0), -1).norm(
dim=1, keepdim=True).pow(self.power)[..., None, None] +
self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
else:
if torch.__version__ >= '1.8':
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist import (all_gather_object, all_reduce, all_gather, all_reduce_dict,
collect_results, gather, broadcast, gather_object,
sync_random_seed, broadcast_object_list,
collect_results_cpu, collect_results_gpu)
from .utils import (get_dist_info, init_dist, init_local_group, get_backend,
get_world_size, get_rank, get_local_size, get_local_rank,
is_main_process, master_only, barrier, get_local_group,
is_distributed, get_default_group, get_data_device,
get_comm_device, cast_data_device)
__all__ = [
'all_gather_object', 'all_reduce', 'all_gather', 'all_reduce_dict',
'collect_results', 'collect_results_cpu', 'collect_results_gpu', 'gather',
'broadcast', 'gather_object', 'sync_random_seed', 'broadcast_object_list',
'get_dist_info', 'init_dist', 'init_local_group', 'get_backend',
'get_world_size', 'get_rank', 'get_local_size', 'get_local_group',
'get_local_rank', 'is_main_process', 'master_only', 'barrier',
'is_distributed', 'get_default_group', 'get_data_device',
'get_comm_device', 'cast_data_device'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist import (all_gather_object, all_reduce, all_gather, all_reduce_dict,
collect_results, gather, broadcast, gather_object,
sync_random_seed, broadcast_object_list,
collect_results_cpu, collect_results_gpu)
from .utils import (get_dist_info, init_dist, init_local_group, get_backend,
get_world_size, get_rank, get_local_size, get_local_rank,
is_main_process, master_only, barrier, get_local_group,
is_distributed, get_default_group)
__all__ = [
'all_gather_object', 'all_reduce', 'all_gather', 'all_reduce_dict',
'collect_results', 'collect_results_cpu', 'collect_results_gpu', 'gather',
'broadcast', 'gather_object', 'sync_random_seed', 'broadcast_object_list',
'get_dist_info', 'init_dist', 'init_local_group', 'get_backend',
'get_world_size', 'get_rank', 'get_local_size', 'get_local_group',
'get_local_rank', 'is_main_process', 'master_only', 'barrier',
'is_distributed', 'get_default_group'
]
|
from docarray.dataclasses.types import dataclass, is_multimodal, field
|
from .types import dataclass, is_multimodal, field
|
import os
import subprocess
import sys
import time
def wait_for_postgres(max_retries=5, delay=5):
for _ in range(max_retries):
try:
result = subprocess.run(
[
"docker",
"compose",
"-f",
"docker-compose.test.yaml",
"exec",
"postgres-test",
"pg_isready",
"-U",
"postgres",
"-d",
"postgres",
],
check=True,
capture_output=True,
text=True,
)
if "accepting connections" in result.stdout:
print("PostgreSQL is ready.")
return True
except subprocess.CalledProcessError:
print(f"PostgreSQL is not ready yet. Retrying in {delay} seconds...")
time.sleep(delay)
print("Failed to connect to PostgreSQL.")
return False
def run_command(command, check=True):
try:
subprocess.run(command, check=check)
except subprocess.CalledProcessError as e:
print(f"Command failed: {e}")
sys.exit(1)
def test():
# Start PostgreSQL with Docker Compose
run_command(
[
"docker",
"compose",
"-f",
"docker-compose.test.yaml",
"up",
"-d",
]
)
if not wait_for_postgres():
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
sys.exit(1)
# IMPORTANT: Set test database environment variables to prevent accidentally
# resetting the developer's local database.
#
# This script spins up a separate test database container (postgres-test) using
# docker-compose.test.yaml. We explicitly set DATABASE_URL and DIRECT_URL to point
# to this test database to ensure that:
# 1. The prisma migrate reset command only affects the test database
# 2. Tests run against the test database, not the developer's local database
# 3. Any database operations during testing are isolated from development data
#
# Without this, if a developer has DATABASE_URL set in their environment pointing
# to their development database, running tests would wipe their local data!
test_env = os.environ.copy()
# Use environment variables if set, otherwise use defaults that match docker-compose.test.yaml
db_user = os.getenv("DB_USER", "postgres")
db_pass = os.getenv("DB_PASS", "postgres")
db_name = os.getenv("DB_NAME", "postgres")
db_port = os.getenv("DB_PORT", "5432")
# Construct the test database URL - this ensures we're always pointing to the test container
test_env["DATABASE_URL"] = (
f"postgresql://{db_user}:{db_pass}@localhost:{db_port}/{db_name}"
)
test_env["DIRECT_URL"] = test_env["DATABASE_URL"]
test_env["DB_PORT"] = db_port
test_env["DB_NAME"] = db_name
test_env["DB_PASS"] = db_pass
test_env["DB_USER"] = db_user
# Run Prisma migrations with test database
# First, reset the database to ensure clean state for tests
# This is safe because we've explicitly set DATABASE_URL to the test database above
subprocess.run(
["prisma", "migrate", "reset", "--force", "--skip-seed"],
env=test_env,
check=False,
)
# Then apply migrations to get the test database schema up to date
subprocess.run(["prisma", "migrate", "deploy"], env=test_env, check=True)
# Run the tests with test database environment
# This ensures all database connections in the tests use the test database,
# not any database that might be configured in the developer's environment
result = subprocess.run(["pytest"] + sys.argv[1:], env=test_env, check=False)
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
sys.exit(result.returncode)
|
import subprocess
import sys
import time
def wait_for_postgres(max_retries=5, delay=5):
for _ in range(max_retries):
try:
result = subprocess.run(
[
"docker",
"compose",
"-f",
"docker-compose.test.yaml",
"exec",
"postgres-test",
"pg_isready",
"-U",
"postgres",
"-d",
"postgres",
],
check=True,
capture_output=True,
text=True,
)
if "accepting connections" in result.stdout:
print("PostgreSQL is ready.")
return True
except subprocess.CalledProcessError:
print(f"PostgreSQL is not ready yet. Retrying in {delay} seconds...")
time.sleep(delay)
print("Failed to connect to PostgreSQL.")
return False
def run_command(command, check=True):
try:
subprocess.run(command, check=check)
except subprocess.CalledProcessError as e:
print(f"Command failed: {e}")
sys.exit(1)
def test():
# Start PostgreSQL with Docker Compose
run_command(
[
"docker",
"compose",
"-f",
"docker-compose.test.yaml",
"up",
"-d",
]
)
if not wait_for_postgres():
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
sys.exit(1)
# Run Prisma migrations
run_command(["prisma", "migrate", "dev"])
# Run the tests
result = subprocess.run(["pytest"] + sys.argv[1:], check=False)
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
sys.exit(result.returncode)
|
"""Init file of LlamaIndex."""
__version__ = "0.12.34.post1"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.33.post1"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
from typing import Callable, Dict, Any, List, Optional, Awaitable
from ag_ui.core import RunAgentInput
from fastapi import APIRouter
from fastapi.responses import StreamingResponse
from llama_index.core.llms.function_calling import FunctionCallingLLM
from llama_index.core.workflow import Workflow
from llama_index.protocols.ag_ui.agent import AGUIChatWorkflow
from llama_index.protocols.ag_ui.events import (
TextMessageStartWorkflowEvent,
TextMessageContentWorkflowEvent,
TextMessageChunkWorkflowEvent,
TextMessageEndWorkflowEvent,
ToolCallStartWorkflowEvent,
ToolCallArgsWorkflowEvent,
ToolCallChunkWorkflowEvent,
ToolCallEndWorkflowEvent,
StateSnapshotWorkflowEvent,
StateDeltaWorkflowEvent,
MessagesSnapshotWorkflowEvent,
RunStartedWorkflowEvent,
RunFinishedWorkflowEvent,
RunErrorWorkflowEvent,
CustomWorkflowEvent,
)
from llama_index.protocols.ag_ui.utils import (
timestamp,
workflow_event_to_sse,
)
AG_UI_EVENTS = (
TextMessageStartWorkflowEvent,
TextMessageContentWorkflowEvent,
TextMessageEndWorkflowEvent,
ToolCallStartWorkflowEvent,
ToolCallArgsWorkflowEvent,
ToolCallEndWorkflowEvent,
StateSnapshotWorkflowEvent,
StateDeltaWorkflowEvent,
MessagesSnapshotWorkflowEvent,
TextMessageChunkWorkflowEvent,
ToolCallChunkWorkflowEvent,
CustomWorkflowEvent,
)
class AGUIWorkflowRouter:
def __init__(self, workflow_factory: Callable[[], Awaitable[Workflow]]):
self.workflow_factory = workflow_factory
self.router = APIRouter()
self.router.add_api_route("/run", self.run, methods=["POST"])
async def run(self, input: RunAgentInput):
workflow = await self.workflow_factory()
handler = workflow.run(
input_data=input,
)
async def stream_response():
try:
yield workflow_event_to_sse(
RunStartedWorkflowEvent(
timestamp=timestamp(),
thread_id=input.thread_id,
run_id=input.run_id,
)
)
async for ev in handler.stream_events():
if isinstance(ev, AG_UI_EVENTS):
yield workflow_event_to_sse(ev)
else:
print(f"Unhandled event: {type(ev)}")
# Finish the run
_ = await handler
# Update the state
state = await handler.ctx.store.get("state", default={})
yield workflow_event_to_sse(StateSnapshotWorkflowEvent(snapshot=state))
yield workflow_event_to_sse(
RunFinishedWorkflowEvent(
timestamp=timestamp(),
thread_id=input.thread_id,
run_id=input.run_id,
)
)
except Exception as e:
yield workflow_event_to_sse(
RunErrorWorkflowEvent(
timestamp=timestamp(),
message=str(e),
code=str(type(e)),
)
)
await handler.cancel_run()
raise
return StreamingResponse(stream_response(), media_type="text/event-stream")
def get_default_workflow_factory(
llm: Optional[FunctionCallingLLM] = None,
frontend_tools: Optional[List[str]] = None,
backend_tools: Optional[List[str]] = None,
initial_state: Optional[Dict[str, Any]] = None,
system_prompt: Optional[str] = None,
timeout: Optional[float] = 120,
) -> Callable[[], Workflow]:
async def workflow_factory():
return AGUIChatWorkflow(
llm=llm,
frontend_tools=frontend_tools,
backend_tools=backend_tools,
initial_state=initial_state,
system_prompt=system_prompt,
timeout=timeout,
)
return workflow_factory
def get_ag_ui_workflow_router(
workflow_factory: Optional[Callable[[], Awaitable[Workflow]]] = None,
llm: Optional[FunctionCallingLLM] = None,
frontend_tools: Optional[List[str]] = None,
backend_tools: Optional[List[str]] = None,
initial_state: Optional[Dict[str, Any]] = None,
system_prompt: Optional[str] = None,
timeout: Optional[float] = 120,
) -> APIRouter:
workflow_factory = workflow_factory or get_default_workflow_factory(
llm, frontend_tools, backend_tools, initial_state, system_prompt, timeout
)
return AGUIWorkflowRouter(workflow_factory).router
|
from typing import Callable, Dict, Any, List, Optional, Awaitable
from ag_ui.core import RunAgentInput
from fastapi import APIRouter
from fastapi.responses import StreamingResponse
from llama_index.core.llms.function_calling import FunctionCallingLLM
from llama_index.core.workflow import Workflow
from llama_index.protocols.ag_ui.agent import AGUIChatWorkflow
from llama_index.protocols.ag_ui.events import (
TextMessageStartWorkflowEvent,
TextMessageContentWorkflowEvent,
TextMessageChunkWorkflowEvent,
TextMessageEndWorkflowEvent,
ToolCallStartWorkflowEvent,
ToolCallArgsWorkflowEvent,
ToolCallChunkWorkflowEvent,
ToolCallEndWorkflowEvent,
StateSnapshotWorkflowEvent,
StateDeltaWorkflowEvent,
MessagesSnapshotWorkflowEvent,
RunStartedWorkflowEvent,
RunFinishedWorkflowEvent,
RunErrorWorkflowEvent,
CustomWorkflowEvent,
)
from llama_index.protocols.ag_ui.utils import (
timestamp,
workflow_event_to_sse,
)
AG_UI_EVENTS = (
TextMessageStartWorkflowEvent,
TextMessageContentWorkflowEvent,
TextMessageEndWorkflowEvent,
ToolCallStartWorkflowEvent,
ToolCallArgsWorkflowEvent,
ToolCallEndWorkflowEvent,
StateSnapshotWorkflowEvent,
StateDeltaWorkflowEvent,
MessagesSnapshotWorkflowEvent,
TextMessageChunkWorkflowEvent,
ToolCallChunkWorkflowEvent,
CustomWorkflowEvent,
)
class AGUIWorkflowRouter:
def __init__(self, workflow_factory: Callable[[], Awaitable[Workflow]]):
self.workflow_factory = workflow_factory
self.router = APIRouter()
self.router.add_api_route("/run", self.run, methods=["POST"])
async def run(self, input: RunAgentInput):
workflow = await self.workflow_factory()
handler = workflow.run(
input_data=input,
)
async def stream_response():
try:
yield workflow_event_to_sse(
RunStartedWorkflowEvent(
timestamp=timestamp(),
thread_id=input.thread_id,
run_id=input.run_id,
)
)
async for ev in handler.stream_events():
if isinstance(ev, AG_UI_EVENTS):
yield workflow_event_to_sse(ev)
else:
print(f"Unhandled event: {type(ev)}")
# Finish the run
_ = await handler
# Update the state
state = await handler.ctx.get("state", default={})
yield workflow_event_to_sse(StateSnapshotWorkflowEvent(snapshot=state))
yield workflow_event_to_sse(
RunFinishedWorkflowEvent(
timestamp=timestamp(),
thread_id=input.thread_id,
run_id=input.run_id,
)
)
except Exception as e:
yield workflow_event_to_sse(
RunErrorWorkflowEvent(
timestamp=timestamp(),
message=str(e),
code=str(type(e)),
)
)
await handler.cancel_run()
raise
return StreamingResponse(stream_response(), media_type="text/event-stream")
def get_default_workflow_factory(
llm: Optional[FunctionCallingLLM] = None,
frontend_tools: Optional[List[str]] = None,
backend_tools: Optional[List[str]] = None,
initial_state: Optional[Dict[str, Any]] = None,
system_prompt: Optional[str] = None,
timeout: Optional[float] = 120,
) -> Callable[[], Workflow]:
async def workflow_factory():
return AGUIChatWorkflow(
llm=llm,
frontend_tools=frontend_tools,
backend_tools=backend_tools,
initial_state=initial_state,
system_prompt=system_prompt,
timeout=timeout,
)
return workflow_factory
def get_ag_ui_workflow_router(
workflow_factory: Optional[Callable[[], Awaitable[Workflow]]] = None,
llm: Optional[FunctionCallingLLM] = None,
frontend_tools: Optional[List[str]] = None,
backend_tools: Optional[List[str]] = None,
initial_state: Optional[Dict[str, Any]] = None,
system_prompt: Optional[str] = None,
timeout: Optional[float] = 120,
) -> APIRouter:
workflow_factory = workflow_factory or get_default_workflow_factory(
llm, frontend_tools, backend_tools, initial_state, system_prompt, timeout
)
return AGUIWorkflowRouter(workflow_factory).router
|
from ._tts import (
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH,
Tacotron2TTSBundle,
)
from ._wav2vec2.impl import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
Wav2Vec2ASRBundle,
Wav2Vec2Bundle,
)
from .rnnt_pipeline import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
__all__ = [
"Wav2Vec2Bundle",
"Wav2Vec2ASRBundle",
"WAV2VEC2_BASE",
"WAV2VEC2_LARGE",
"WAV2VEC2_LARGE_LV60K",
"WAV2VEC2_ASR_BASE_10M",
"WAV2VEC2_ASR_BASE_100H",
"WAV2VEC2_ASR_BASE_960H",
"WAV2VEC2_ASR_LARGE_10M",
"WAV2VEC2_ASR_LARGE_100H",
"WAV2VEC2_ASR_LARGE_960H",
"WAV2VEC2_ASR_LARGE_LV60K_10M",
"WAV2VEC2_ASR_LARGE_LV60K_100H",
"WAV2VEC2_ASR_LARGE_LV60K_960H",
"WAV2VEC2_XLSR53",
"VOXPOPULI_ASR_BASE_10K_EN",
"VOXPOPULI_ASR_BASE_10K_ES",
"VOXPOPULI_ASR_BASE_10K_DE",
"VOXPOPULI_ASR_BASE_10K_FR",
"VOXPOPULI_ASR_BASE_10K_IT",
"HUBERT_BASE",
"HUBERT_LARGE",
"HUBERT_XLARGE",
"HUBERT_ASR_LARGE",
"HUBERT_ASR_XLARGE",
"Tacotron2TTSBundle",
"TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH",
"TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH",
"TACOTRON2_WAVERNN_CHAR_LJSPEECH",
"TACOTRON2_WAVERNN_PHONE_LJSPEECH",
"RNNTBundle",
"EMFORMER_RNNT_BASE_LIBRISPEECH",
]
|
from ._tts import (
Tacotron2TTSBundle,
TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH,
TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH,
TACOTRON2_WAVERNN_CHAR_LJSPEECH,
TACOTRON2_WAVERNN_PHONE_LJSPEECH,
)
from ._wav2vec2.impl import (
Wav2Vec2Bundle,
Wav2Vec2ASRBundle,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_XLSR53,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
)
from .rnnt_pipeline import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
__all__ = [
"Wav2Vec2Bundle",
"Wav2Vec2ASRBundle",
"WAV2VEC2_BASE",
"WAV2VEC2_LARGE",
"WAV2VEC2_LARGE_LV60K",
"WAV2VEC2_ASR_BASE_10M",
"WAV2VEC2_ASR_BASE_100H",
"WAV2VEC2_ASR_BASE_960H",
"WAV2VEC2_ASR_LARGE_10M",
"WAV2VEC2_ASR_LARGE_100H",
"WAV2VEC2_ASR_LARGE_960H",
"WAV2VEC2_ASR_LARGE_LV60K_10M",
"WAV2VEC2_ASR_LARGE_LV60K_100H",
"WAV2VEC2_ASR_LARGE_LV60K_960H",
"WAV2VEC2_XLSR53",
"VOXPOPULI_ASR_BASE_10K_EN",
"VOXPOPULI_ASR_BASE_10K_ES",
"VOXPOPULI_ASR_BASE_10K_DE",
"VOXPOPULI_ASR_BASE_10K_FR",
"VOXPOPULI_ASR_BASE_10K_IT",
"HUBERT_BASE",
"HUBERT_LARGE",
"HUBERT_XLARGE",
"HUBERT_ASR_LARGE",
"HUBERT_ASR_XLARGE",
"Tacotron2TTSBundle",
"TACOTRON2_GRIFFINLIM_CHAR_LJSPEECH",
"TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH",
"TACOTRON2_WAVERNN_CHAR_LJSPEECH",
"TACOTRON2_WAVERNN_PHONE_LJSPEECH",
"RNNTBundle",
"EMFORMER_RNNT_BASE_LIBRISPEECH",
]
|
import os
import pytest
from docarray import Document
from jina import Executor, Flow, requests
class MyExec(Executor):
@requests
def foo(self, docs, **kwargs):
pass
@pytest.fixture
def cert_prefix():
cur_dir = os.path.dirname(os.path.abspath(__file__))
return f'{cur_dir}/../../../unit/serve/runtimes/gateway/grpc/cert/'
@pytest.fixture
def cert_pem(cert_prefix):
"""This is the cert entry of a self-signed local cert"""
return cert_prefix + '/server.crt'
@pytest.fixture
def key_pem(cert_prefix):
"""This is the key entry of a self-signed local cert"""
return cert_prefix + '/server.key'
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('tls', [True, False])
@pytest.mark.parametrize(
'uses',
['jinaai+sandbox://jina-ai/DummyHubExecutor'],
)
def test_deployment_protocol(protocol, tls, cert_pem, key_pem, uses):
cert = cert_pem if tls else None
key = key_pem if tls else None
f = (
Flow(protocol=protocol)
.config_gateway(ssl_certfile=cert, ssl_keyfile=key)
.add(uses=MyExec)
.add(uses=uses)
)
with f:
for node, v in f._deployment_nodes.items():
p = v.protocol.lower()
if node == 'gateway':
assert p == protocol + ('s' if tls else '')
elif node == 'executor0':
assert p == 'grpc'
elif node == 'executor1':
assert p == 'grpcs'
|
import os
import pytest
from docarray import Document
from jina import Executor, Flow, requests
class MyExec(Executor):
@requests
def foo(self, docs, **kwargs):
pass
@pytest.fixture
def cert_prefix():
cur_dir = os.path.dirname(os.path.abspath(__file__))
return f'{cur_dir}/../../../unit/serve/runtimes/gateway/grpc/cert/'
@pytest.fixture
def cert_pem(cert_prefix):
"""This is the cert entry of a self-signed local cert"""
return cert_prefix + '/server.crt'
@pytest.fixture
def key_pem(cert_prefix):
"""This is the key entry of a self-signed local cert"""
return cert_prefix + '/server.key'
@pytest.mark.parametrize('protocol', ['http', 'websocket', 'grpc'])
@pytest.mark.parametrize('tls', [True, False])
@pytest.mark.parametrize(
'uses',
['jinaai+sandbox://jina-ai/DummyHubExecutor'],
)
def test_deployment_protocol(protocol, tls, cert_pem, key_pem, uses):
cert = cert_pem if tls else None
key = key_pem if tls else None
f = (
Flow(protocol=protocol, ssl_certfile=cert, ssl_keyfile=key)
.add(uses=MyExec)
.add(uses=uses)
)
with f:
for node, v in f._deployment_nodes.items():
p = v.protocol.lower()
if node == 'gateway':
assert p == protocol + ('s' if tls else '')
elif node == 'executor0':
assert p == 'grpc'
elif node == 'executor1':
assert p == 'grpcs'
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility that updates the metadata of the Diffusers library in the repository `huggingface/diffusers-metadata`.
Usage for an update (as used by the GitHub action `update_metadata`):
```bash
python utils/update_metadata.py
```
Script modified from:
https://github.com/huggingface/transformers/blob/main/utils/update_metadata.py
"""
import argparse
import os
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from diffusers.pipelines.auto_pipeline import (
AUTO_IMAGE2IMAGE_PIPELINES_MAPPING,
AUTO_INPAINT_PIPELINES_MAPPING,
AUTO_TEXT2IMAGE_PIPELINES_MAPPING,
)
PIPELINE_TAG_JSON = "pipeline_tags.json"
def get_supported_pipeline_table() -> dict:
"""
Generates a dictionary containing the supported auto classes for each pipeline type,
using the content of the auto modules.
"""
# All supported pipelines for automatic mapping.
all_supported_pipeline_classes = [
(class_name.__name__, "text-to-image", "AutoPipelineForText2Image")
for _, class_name in AUTO_TEXT2IMAGE_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes += [
(class_name.__name__, "image-to-image", "AutoPipelineForImage2Image")
for _, class_name in AUTO_IMAGE2IMAGE_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes += [
(class_name.__name__, "image-to-image", "AutoPipelineForInpainting")
for _, class_name in AUTO_INPAINT_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes = list(set(all_supported_pipeline_classes))
all_supported_pipeline_classes.sort(key=lambda x: x[0])
data = {}
data["pipeline_class"] = [sample[0] for sample in all_supported_pipeline_classes]
data["pipeline_tag"] = [sample[1] for sample in all_supported_pipeline_classes]
data["auto_class"] = [sample[2] for sample in all_supported_pipeline_classes]
return data
def update_metadata(commit_sha: str):
"""
Update the metadata for the Diffusers repo in `huggingface/diffusers-metadata`.
Args:
commit_sha (`str`): The commit SHA on Diffusers corresponding to this update.
"""
pipelines_table = get_supported_pipeline_table()
pipelines_table = pd.DataFrame(pipelines_table)
pipelines_dataset = Dataset.from_pandas(pipelines_table)
hub_pipeline_tags_json = hf_hub_download(
repo_id="huggingface/diffusers-metadata",
filename=PIPELINE_TAG_JSON,
repo_type="dataset",
)
with open(hub_pipeline_tags_json) as f:
hub_pipeline_tags_json = f.read()
with tempfile.TemporaryDirectory() as tmp_dir:
pipelines_dataset.to_json(os.path.join(tmp_dir, PIPELINE_TAG_JSON))
with open(os.path.join(tmp_dir, PIPELINE_TAG_JSON)) as f:
pipeline_tags_json = f.read()
hub_pipeline_tags_equal = hub_pipeline_tags_json == pipeline_tags_json
if hub_pipeline_tags_equal:
print("No updates, not pushing the metadata files.")
return
if commit_sha is not None:
commit_message = (
f"Update with commit {commit_sha}\n\nSee: "
f"https://github.com/huggingface/diffusers/commit/{commit_sha}"
)
else:
commit_message = "Update"
upload_folder(
repo_id="huggingface/diffusers-metadata",
folder_path=tmp_dir,
repo_type="dataset",
commit_message=commit_message,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--commit_sha", default=None, type=str, help="The sha of the commit going with this update.")
args = parser.parse_args()
update_metadata(args.commit_sha)
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility that updates the metadata of the Diffusers library in the repository `huggingface/diffusers-metadata`.
Usage for an update (as used by the GitHub action `update_metadata`):
```bash
python utils/update_metadata.py
```
Script modified from:
https://github.com/huggingface/transformers/blob/main/utils/update_metadata.py
"""
import argparse
import os
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from diffusers.pipelines.auto_pipeline import (
AUTO_IMAGE2IMAGE_PIPELINES_MAPPING,
AUTO_INPAINT_PIPELINES_MAPPING,
AUTO_TEXT2IMAGE_PIPELINES_MAPPING,
)
PIPELINE_TAG_JSON = "pipeline_tags.json"
def get_supported_pipeline_table() -> dict:
"""
Generates a dictionary containing the supported auto classes for each pipeline type,
using the content of the auto modules.
"""
# All supported pipelines for automatic mapping.
all_supported_pipeline_classes = [
(class_name.__name__, "text-to-image", "AutoPipelineForText2Image")
for _, class_name in AUTO_TEXT2IMAGE_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes += [
(class_name.__name__, "image-to-image", "AutoPipelineForImage2Image")
for _, class_name in AUTO_IMAGE2IMAGE_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes += [
(class_name.__name__, "image-to-image", "AutoPipelineForInpainting")
for _, class_name in AUTO_INPAINT_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes = list(set(all_supported_pipeline_classes))
all_supported_pipeline_classes.sort(key=lambda x: x[0])
data = {}
data["pipeline_class"] = [sample[0] for sample in all_supported_pipeline_classes]
data["pipeline_tag"] = [sample[1] for sample in all_supported_pipeline_classes]
data["auto_class"] = [sample[2] for sample in all_supported_pipeline_classes]
return data
def update_metadata(commit_sha: str):
"""
Update the metadata for the Diffusers repo in `huggingface/diffusers-metadata`.
Args:
commit_sha (`str`): The commit SHA on Diffusers corresponding to this update.
"""
pipelines_table = get_supported_pipeline_table()
pipelines_table = pd.DataFrame(pipelines_table)
pipelines_dataset = Dataset.from_pandas(pipelines_table)
hub_pipeline_tags_json = hf_hub_download(
repo_id="huggingface/diffusers-metadata",
filename=PIPELINE_TAG_JSON,
repo_type="dataset",
)
with open(hub_pipeline_tags_json) as f:
hub_pipeline_tags_json = f.read()
with tempfile.TemporaryDirectory() as tmp_dir:
pipelines_dataset.to_json(os.path.join(tmp_dir, PIPELINE_TAG_JSON))
with open(os.path.join(tmp_dir, PIPELINE_TAG_JSON)) as f:
pipeline_tags_json = f.read()
hub_pipeline_tags_equal = hub_pipeline_tags_json == pipeline_tags_json
if hub_pipeline_tags_equal:
print("No updates, not pushing the metadata files.")
return
if commit_sha is not None:
commit_message = (
f"Update with commit {commit_sha}\n\nSee: "
f"https://github.com/huggingface/diffusers/commit/{commit_sha}"
)
else:
commit_message = "Update"
upload_folder(
repo_id="huggingface/diffusers-metadata",
folder_path=tmp_dir,
repo_type="dataset",
commit_message=commit_message,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--commit_sha", default=None, type=str, help="The sha of the commit going with this update.")
args = parser.parse_args()
update_metadata(args.commit_sha)
|
import os
import urllib
import numpy as np
import PIL
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(ImageUrl)
def test_dump_json():
url = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
w, h = 224, 224
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_to_bytes(width=w, height=h)
assert isinstance(_bytes, bytes)
img = PIL.Image.frombytes(mode='1', size=(w, h), data=_bytes)
assert isinstance(img, PIL.Image.Image)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
],
)
def test_validation(image_format, path_to_img):
if image_format == 'illegal':
with pytest.raises(ValueError):
parse_obj_as(ImageUrl, path_to_img)
else:
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
import os
import urllib
import numpy as np
import PIL
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(ImageUrl)
def test_dump_json():
url = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
w, h = 224, 224
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_to_bytes(width=w, height=h)
assert isinstance(_bytes, bytes)
img = PIL.Image.frombytes(mode='1', size=(w, h), data=_bytes)
assert isinstance(img, PIL.Image.Image)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
],
)
def test_validation(image_format, path_to_img):
if image_format == 'illegal':
with pytest.raises(ValueError):
parse_obj_as(ImageUrl, path_to_img)
else:
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
"""Testing code shared by other tests."""
# pylint: disable=invalid-name
import collections
import importlib.util
import json
import os
import tempfile
from typing import Any, Callable, Dict, Type
import numpy as np
import xgboost as xgb
from xgboost._typing import ArrayLike
def validate_leaf_output(leaf: np.ndarray, num_parallel_tree: int) -> None:
"""Validate output for predict leaf tests."""
for i in range(leaf.shape[0]): # n_samples
for j in range(leaf.shape[1]): # n_rounds
for k in range(leaf.shape[2]): # n_classes
tree_group = leaf[i, j, k, :]
assert tree_group.shape[0] == num_parallel_tree
# No sampling, all trees within forest are the same
assert np.all(tree_group == tree_group[0])
def validate_data_initialization(
dmatrix: Type, model: Type[xgb.XGBModel], X: ArrayLike, y: ArrayLike
) -> None:
"""Assert that we don't create duplicated DMatrix."""
old_init = dmatrix.__init__
count = [0]
def new_init(self: Any, **kwargs: Any) -> Callable:
count[0] += 1
return old_init(self, **kwargs)
dmatrix.__init__ = new_init
model(n_estimators=1).fit(X, y, eval_set=[(X, y)])
assert count[0] == 1
count[0] = 0 # only 1 DMatrix is created.
y_copy = y.copy()
model(n_estimators=1).fit(X, y, eval_set=[(X, y_copy)])
assert count[0] == 2 # a different Python object is considered different
dmatrix.__init__ = old_init
# pylint: disable=too-many-arguments,too-many-locals
def get_feature_weights(
*,
X: ArrayLike,
y: ArrayLike,
fw: np.ndarray,
parser_path: str,
tree_method: str,
model: Type[xgb.XGBModel] = xgb.XGBRegressor,
) -> np.ndarray:
"""Get feature weights using the demo parser."""
with tempfile.TemporaryDirectory() as tmpdir:
colsample_bynode = 0.5
reg = model(
tree_method=tree_method,
colsample_bynode=colsample_bynode,
feature_weights=fw,
)
reg.fit(X, y)
model_path = os.path.join(tmpdir, "model.json")
reg.save_model(model_path)
with open(model_path, "r", encoding="utf-8") as fd:
model = json.load(fd)
spec = importlib.util.spec_from_file_location("JsonParser", parser_path)
assert spec is not None
jsonm = importlib.util.module_from_spec(spec)
assert spec.loader is not None
spec.loader.exec_module(jsonm)
model = jsonm.Model(model)
splits: Dict[int, int] = {}
total_nodes = 0
for tree in model.trees:
n_nodes = len(tree.nodes)
total_nodes += n_nodes
for n in range(n_nodes):
if tree.is_leaf(n):
continue
if splits.get(tree.split_index(n), None) is None:
splits[tree.split_index(n)] = 1
else:
splits[tree.split_index(n)] += 1
od = collections.OrderedDict(sorted(splits.items()))
tuples = list(od.items())
k, v = list(zip(*tuples))
w = np.polyfit(k, v, deg=1)
return w
|
"""Testing code shared by other tests."""
# pylint: disable=invalid-name
import collections
import importlib.util
import json
import os
import tempfile
from typing import Any, Callable, Dict, Type
import numpy as np
import xgboost as xgb
from xgboost._typing import ArrayLike
def validate_leaf_output(leaf: np.ndarray, num_parallel_tree: int) -> None:
"""Validate output for predict leaf tests."""
for i in range(leaf.shape[0]): # n_samples
for j in range(leaf.shape[1]): # n_rounds
for k in range(leaf.shape[2]): # n_classes
tree_group = leaf[i, j, k, :]
assert tree_group.shape[0] == num_parallel_tree
# No sampling, all trees within forest are the same
assert np.all(tree_group == tree_group[0])
def validate_data_initialization(
dmatrix: Type, model: Type[xgb.XGBModel], X: ArrayLike, y: ArrayLike
) -> None:
"""Assert that we don't create duplicated DMatrix."""
old_init = dmatrix.__init__
count = [0]
def new_init(self: Any, **kwargs: Any) -> Callable:
count[0] += 1
return old_init(self, **kwargs)
dmatrix.__init__ = new_init
model(n_estimators=1).fit(X, y, eval_set=[(X, y)])
assert count[0] == 1
count[0] = 0 # only 1 DMatrix is created.
y_copy = y.copy()
model(n_estimators=1).fit(X, y, eval_set=[(X, y_copy)])
assert count[0] == 2 # a different Python object is considered different
dmatrix.__init__ = old_init
# pylint: disable=too-many-arguments,too-many-locals
def get_feature_weights(
*,
X: ArrayLike,
y: ArrayLike,
fw: np.ndarray,
parser_path: str,
tree_method: str,
model: Type[xgb.XGBModel] = xgb.XGBRegressor,
) -> np.ndarray:
"""Get feature weights using the demo parser."""
with tempfile.TemporaryDirectory() as tmpdir:
colsample_bynode = 0.5
reg = model(tree_method=tree_method, colsample_bynode=colsample_bynode)
reg.fit(X, y, feature_weights=fw)
model_path = os.path.join(tmpdir, "model.json")
reg.save_model(model_path)
with open(model_path, "r", encoding="utf-8") as fd:
model = json.load(fd)
spec = importlib.util.spec_from_file_location("JsonParser", parser_path)
assert spec is not None
jsonm = importlib.util.module_from_spec(spec)
assert spec.loader is not None
spec.loader.exec_module(jsonm)
model = jsonm.Model(model)
splits: Dict[int, int] = {}
total_nodes = 0
for tree in model.trees:
n_nodes = len(tree.nodes)
total_nodes += n_nodes
for n in range(n_nodes):
if tree.is_leaf(n):
continue
if splits.get(tree.split_index(n), None) is None:
splits[tree.split_index(n)] = 1
else:
splits[tree.split_index(n)] += 1
od = collections.OrderedDict(sorted(splits.items()))
tuples = list(od.items())
k, v = list(zip(*tuples))
w = np.polyfit(k, v, deg=1)
return w
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"Z\n\x0cKeyValuePair\x12#\n\x03key\x18\x01 \x01(\x0b\x32\x16.google.protobuf.Value\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\";\n\x10GenericDictValue\x12\'\n\x07\x65ntries\x18\x01 \x03(\x0b\x32\x16.docarray.KeyValuePair\"\xcb\x03\n\tNodeProto\x12\x0e\n\x04text\x18\x01 \x01(\tH\x00\x12\x11\n\x07integer\x18\x02 \x01(\x05H\x00\x12\x0f\n\x05\x66loat\x18\x03 \x01(\x01H\x00\x12\x11\n\x07\x62oolean\x18\x04 \x01(\x08H\x00\x12\x0e\n\x04\x62lob\x18\x05 \x01(\x0cH\x00\x12)\n\x07ndarray\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12+\n\x08\x64ocument\x18\x07 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12\x36\n\x0e\x64ocument_array\x18\x08 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12*\n\x04list\x18\t \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x12)\n\x03set\x18\n \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x12+\n\x05tuple\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x12\'\n\x04\x64ict\x18\x0c \x01(\x0b\x32\x17.google.protobuf.StructH\x00\x12\x0e\n\x04type\x18\r \x01(\tH\x01\x42\t\n\x07\x63ontentB\x0f\n\rdocarray_type\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProto\"\x88\x03\n\x19\x44ocumentArrayStackedProto\x12+\n\x05list_\x18\x01 \x01(\x0b\x32\x1c.docarray.DocumentArrayProto\x12H\n\x0b\x64oc_columns\x18\x02 \x03(\x0b\x32\x33.docarray.DocumentArrayStackedProto.DocColumnsEntry\x12N\n\x0etensor_columns\x18\x03 \x03(\x0b\x32\x36.docarray.DocumentArrayStackedProto.TensorColumnsEntry\x1aV\n\x0f\x44ocColumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.docarray.DocumentArrayStackedProto:\x02\x38\x01\x1aL\n\x12TensorColumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProto:\x02\x38\x01\x62\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DOCUMENTARRAYSTACKEDPROTO_DOCCOLUMNSENTRY._options = None
_DOCUMENTARRAYSTACKEDPROTO_DOCCOLUMNSENTRY._serialized_options = b'8\001'
_DOCUMENTARRAYSTACKEDPROTO_TENSORCOLUMNSENTRY._options = None
_DOCUMENTARRAYSTACKEDPROTO_TENSORCOLUMNSENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_KEYVALUEPAIR._serialized_start = 230
_KEYVALUEPAIR._serialized_end = 320
_GENERICDICTVALUE._serialized_start = 322
_GENERICDICTVALUE._serialized_end = 381
_NODEPROTO._serialized_start = 384
_NODEPROTO._serialized_end = 843
_DOCUMENTPROTO._serialized_start = 846
_DOCUMENTPROTO._serialized_end = 976
_DOCUMENTPROTO_DATAENTRY._serialized_start = 912
_DOCUMENTPROTO_DATAENTRY._serialized_end = 976
_DOCUMENTARRAYPROTO._serialized_start = 978
_DOCUMENTARRAYPROTO._serialized_end = 1037
_DOCUMENTARRAYSTACKEDPROTO._serialized_start = 1040
_DOCUMENTARRAYSTACKEDPROTO._serialized_end = 1432
_DOCUMENTARRAYSTACKEDPROTO_DOCCOLUMNSENTRY._serialized_start = 1268
_DOCUMENTARRAYSTACKEDPROTO_DOCCOLUMNSENTRY._serialized_end = 1354
_DOCUMENTARRAYSTACKEDPROTO_TENSORCOLUMNSENTRY._serialized_start = 1356
_DOCUMENTARRAYSTACKEDPROTO_TENSORCOLUMNSENTRY._serialized_end = 1432
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"Z\n\x0cKeyValuePair\x12#\n\x03key\x18\x01 \x01(\x0b\x32\x16.google.protobuf.Value\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\";\n\x10GenericDictValue\x12\'\n\x07\x65ntries\x18\x01 \x03(\x0b\x32\x16.docarray.KeyValuePair\"\xcb\x03\n\tNodeProto\x12\x0e\n\x04text\x18\x01 \x01(\tH\x00\x12\x11\n\x07integer\x18\x02 \x01(\x05H\x00\x12\x0f\n\x05\x66loat\x18\x03 \x01(\x01H\x00\x12\x11\n\x07\x62oolean\x18\x04 \x01(\x08H\x00\x12\x0e\n\x04\x62lob\x18\x05 \x01(\x0cH\x00\x12)\n\x07ndarray\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12+\n\x08\x64ocument\x18\x07 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12\x36\n\x0e\x64ocument_array\x18\x08 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12*\n\x04list\x18\t \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x12)\n\x03set\x18\n \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x12+\n\x05tuple\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x12\'\n\x04\x64ict\x18\x0c \x01(\x0b\x32\x17.google.protobuf.StructH\x00\x12\x0e\n\x04type\x18\r \x01(\tH\x01\x42\t\n\x07\x63ontentB\x0f\n\rdocarray_type\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProto\"\x86\x01\n\x0fUnionArrayProto\x12=\n\x0e\x64ocument_array\x18\x01 \x01(\x0b\x32#.docarray.DocumentArrayStackedProtoH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\xd6\x01\n\x19\x44ocumentArrayStackedProto\x12+\n\x05list_\x18\x01 \x01(\x0b\x32\x1c.docarray.DocumentArrayProto\x12\x41\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x30.docarray.DocumentArrayStackedProto.ColumnsEntry\x1aI\n\x0c\x43olumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.docarray.UnionArrayProto:\x02\x38\x01\x62\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._options = None
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_KEYVALUEPAIR._serialized_start = 230
_KEYVALUEPAIR._serialized_end = 320
_GENERICDICTVALUE._serialized_start = 322
_GENERICDICTVALUE._serialized_end = 381
_NODEPROTO._serialized_start = 384
_NODEPROTO._serialized_end = 843
_DOCUMENTPROTO._serialized_start = 846
_DOCUMENTPROTO._serialized_end = 976
_DOCUMENTPROTO_DATAENTRY._serialized_start = 912
_DOCUMENTPROTO_DATAENTRY._serialized_end = 976
_DOCUMENTARRAYPROTO._serialized_start = 978
_DOCUMENTARRAYPROTO._serialized_end = 1037
_UNIONARRAYPROTO._serialized_start = 1040
_UNIONARRAYPROTO._serialized_end = 1174
_DOCUMENTARRAYSTACKEDPROTO._serialized_start = 1177
_DOCUMENTARRAYSTACKEDPROTO._serialized_end = 1391
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_start = 1318
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_end = 1391
# @@protoc_insertion_point(module_scope)
|
"""
Quantile Regression
===================
.. versionadded:: 2.0.0
The script is inspired by this awesome example in sklearn:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
.. note::
The feature is only supported using the Python, R, and C packages. In addition, quantile
crossing can happen due to limitation in the algorithm.
"""
import argparse
from typing import Dict
import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
def f(x: np.ndarray) -> np.ndarray:
"""The function to predict."""
return x * np.sin(x)
def quantile_loss(args: argparse.Namespace) -> None:
"""Train a quantile regression model."""
rng = np.random.RandomState(1994)
# Generate a synthetic dataset for demo, the generate process is from the sklearn
# example.
X = np.atleast_2d(rng.uniform(0, 10.0, size=1000)).T
expected_y = f(X).ravel()
sigma = 0.5 + X.ravel() / 10.0
noise = rng.lognormal(sigma=sigma) - np.exp(sigma**2.0 / 2.0)
y = expected_y + noise
# Train on 0.05 and 0.95 quantiles. The model is similar to multi-class and
# multi-target models.
alpha = np.array([0.05, 0.5, 0.95])
evals_result: Dict[str, Dict] = {}
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
# We will be using the `hist` tree method, quantile DMatrix can be used to preserve
# memory (which has nothing to do with quantile regression itself, see its document
# for details).
# Do not use the `exact` tree method for quantile regression, otherwise the
# performance might drop.
Xy = xgb.QuantileDMatrix(X_train, y_train)
# use Xy as a reference
Xy_test = xgb.QuantileDMatrix(X_test, y_test, ref=Xy)
booster = xgb.train(
{
# Use the quantile objective function.
"objective": "reg:quantileerror",
"tree_method": "hist",
"quantile_alpha": alpha,
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
# The evaluation result is a weighted average across multiple quantiles.
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
scores = booster.inplace_predict(xx)
# dim 1 is the quantiles
assert scores.shape[0] == xx.shape[0]
assert scores.shape[1] == alpha.shape[0]
y_lower = scores[:, 0] # alpha=0.05
y_med = scores[:, 1] # alpha=0.5, median
y_upper = scores[:, 2] # alpha=0.95
# Train a mse model for comparison
booster = xgb.train(
{
"objective": "reg:squarederror",
"tree_method": "hist",
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
y_pred = booster.inplace_predict(xx)
if args.plot:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(10, 10))
plt.plot(xx, f(xx), "g:", linewidth=3, label=r"$f(x) = x\,\sin(x)$")
plt.plot(X_test, y_test, "b.", markersize=10, label="Test observations")
plt.plot(xx, y_med, "r-", label="Predicted median")
plt.plot(xx, y_pred, "m-", label="Predicted mean")
plt.plot(xx, y_upper, "k-")
plt.plot(xx, y_lower, "k-")
plt.fill_between(
xx.ravel(), y_lower, y_upper, alpha=0.4, label="Predicted 90% interval"
)
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
plt.ylim(-10, 25)
plt.legend(loc="upper left")
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--plot",
action="store_true",
help="Specify it to enable plotting the outputs.",
)
args = parser.parse_args()
quantile_loss(args)
|
"""
Quantile Regression
===================
.. versionadded:: 2.0.0
The script is inspired by this awesome example in sklearn:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
.. note::
The feature is only supported using the Python, R, and C packages. In addition, quantile
crossing can happen due to limitation in the algorithm.
"""
import argparse
from typing import Dict
import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
def f(x: np.ndarray) -> np.ndarray:
"""The function to predict."""
return x * np.sin(x)
def quantile_loss(args: argparse.Namespace) -> None:
"""Train a quantile regression model."""
rng = np.random.RandomState(1994)
# Generate a synthetic dataset for demo, the generate process is from the sklearn
# example.
X = np.atleast_2d(rng.uniform(0, 10.0, size=1000)).T
expected_y = f(X).ravel()
sigma = 0.5 + X.ravel() / 10.0
noise = rng.lognormal(sigma=sigma) - np.exp(sigma**2.0 / 2.0)
y = expected_y + noise
# Train on 0.05 and 0.95 quantiles. The model is similar to multi-class and
# multi-target models.
alpha = np.array([0.05, 0.5, 0.95])
evals_result: Dict[str, Dict] = {}
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
# We will be using the `hist` tree method, quantile DMatrix can be used to preserve
# memory (which has nothing to do with quantile regression itself, see its document
# for details).
# Do not use the `exact` tree method for quantile regression, otherwise the
# performance might drop.
Xy = xgb.QuantileDMatrix(X_train, y_train)
# use Xy as a reference
Xy_test = xgb.QuantileDMatrix(X_test, y_test, ref=Xy)
booster = xgb.train(
{
# Use the quantile objective function.
"objective": "reg:quantileerror",
"tree_method": "hist",
"quantile_alpha": alpha,
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
# The evaluation result is a weighted average across multiple quantiles.
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
scores = booster.inplace_predict(xx)
# dim 1 is the quantiles
assert scores.shape[0] == xx.shape[0]
assert scores.shape[1] == alpha.shape[0]
y_lower = scores[:, 0] # alpha=0.05
y_med = scores[:, 1] # alpha=0.5, median
y_upper = scores[:, 2] # alpha=0.95
# Train a mse model for comparison
booster = xgb.train(
{
"objective": "reg:squarederror",
"tree_method": "hist",
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
y_pred = booster.inplace_predict(xx)
if args.plot:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(10, 10))
plt.plot(xx, f(xx), "g:", linewidth=3, label=r"$f(x) = x\,\sin(x)$")
plt.plot(X_test, y_test, "b.", markersize=10, label="Test observations")
plt.plot(xx, y_med, "r-", label="Predicted median")
plt.plot(xx, y_pred, "m-", label="Predicted mean")
plt.plot(xx, y_upper, "k-")
plt.plot(xx, y_lower, "k-")
plt.fill_between(
xx.ravel(), y_lower, y_upper, alpha=0.4, label="Predicted 90% interval"
)
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
plt.ylim(-10, 25)
plt.legend(loc="upper left")
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--plot",
action="store_true",
help="Specify it to enable plotting the outputs.",
)
args = parser.parse_args()
quantile_loss(args)
|
from langchain_core.messages import AIMessage, ToolCall, ToolMessage
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import (
parse_ai_message_to_openai_tool_action,
)
def test_calls_convert_agent_action_to_messages() -> None:
additional_kwargs1 = {
"tool_calls": [
{
"id": "call_abcd12345",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "add"},
"type": "function",
},
],
}
message1 = AIMessage(content="", additional_kwargs=additional_kwargs1)
actions1 = parse_ai_message_to_openai_tool_action(message1)
additional_kwargs2 = {
"tool_calls": [
{
"id": "call_abcd54321",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "subtract"},
"type": "function",
},
],
}
message2 = AIMessage(content="", additional_kwargs=additional_kwargs2)
actions2 = parse_ai_message_to_openai_tool_action(message2)
additional_kwargs3 = {
"tool_calls": [
{
"id": "call_abcd67890",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "multiply"},
"type": "function",
},
{
"id": "call_abcd09876",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "divide"},
"type": "function",
},
],
}
message3 = AIMessage(content="", additional_kwargs=additional_kwargs3)
actions3 = parse_ai_message_to_openai_tool_action(message3)
message4 = AIMessage(
content="",
tool_calls=[
ToolCall(name="exponentiate", args={"a": 3, "b": 5}, id="call_abc02468"),
],
)
actions4 = parse_ai_message_to_openai_tool_action(message4)
# for mypy
assert isinstance(actions1, list)
assert isinstance(actions2, list)
assert isinstance(actions3, list)
assert isinstance(actions4, list)
intermediate_steps = [
(actions1[0], "observation1"),
(actions2[0], "observation2"),
(actions3[0], "observation3"),
(actions3[1], "observation4"),
(actions4[0], "observation4"),
]
expected_messages = [
message1,
ToolMessage(
tool_call_id="call_abcd12345",
content="observation1",
additional_kwargs={"name": "add"},
),
message2,
ToolMessage(
tool_call_id="call_abcd54321",
content="observation2",
additional_kwargs={"name": "subtract"},
),
message3,
ToolMessage(
tool_call_id="call_abcd67890",
content="observation3",
additional_kwargs={"name": "multiply"},
),
ToolMessage(
tool_call_id="call_abcd09876",
content="observation4",
additional_kwargs={"name": "divide"},
),
message4,
ToolMessage(
tool_call_id="call_abc02468",
content="observation4",
additional_kwargs={"name": "exponentiate"},
),
]
output = format_to_openai_tool_messages(intermediate_steps)
assert output == expected_messages
def test_handles_empty_input_list() -> None:
output = format_to_openai_tool_messages([])
assert output == []
|
from langchain_core.messages import AIMessage, ToolCall, ToolMessage
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import (
parse_ai_message_to_openai_tool_action,
)
def test_calls_convert_agent_action_to_messages() -> None:
additional_kwargs1 = {
"tool_calls": [
{
"id": "call_abcd12345",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "add"},
"type": "function",
}
],
}
message1 = AIMessage(content="", additional_kwargs=additional_kwargs1)
actions1 = parse_ai_message_to_openai_tool_action(message1)
additional_kwargs2 = {
"tool_calls": [
{
"id": "call_abcd54321",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "subtract"},
"type": "function",
}
],
}
message2 = AIMessage(content="", additional_kwargs=additional_kwargs2)
actions2 = parse_ai_message_to_openai_tool_action(message2)
additional_kwargs3 = {
"tool_calls": [
{
"id": "call_abcd67890",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "multiply"},
"type": "function",
},
{
"id": "call_abcd09876",
"function": {"arguments": '{"a": 3, "b": 5}', "name": "divide"},
"type": "function",
},
],
}
message3 = AIMessage(content="", additional_kwargs=additional_kwargs3)
actions3 = parse_ai_message_to_openai_tool_action(message3)
message4 = AIMessage(
content="",
tool_calls=[
ToolCall(name="exponentiate", args={"a": 3, "b": 5}, id="call_abc02468")
],
)
actions4 = parse_ai_message_to_openai_tool_action(message4)
# for mypy
assert isinstance(actions1, list)
assert isinstance(actions2, list)
assert isinstance(actions3, list)
assert isinstance(actions4, list)
intermediate_steps = [
(actions1[0], "observation1"),
(actions2[0], "observation2"),
(actions3[0], "observation3"),
(actions3[1], "observation4"),
(actions4[0], "observation4"),
]
expected_messages = [
message1,
ToolMessage(
tool_call_id="call_abcd12345",
content="observation1",
additional_kwargs={"name": "add"},
),
message2,
ToolMessage(
tool_call_id="call_abcd54321",
content="observation2",
additional_kwargs={"name": "subtract"},
),
message3,
ToolMessage(
tool_call_id="call_abcd67890",
content="observation3",
additional_kwargs={"name": "multiply"},
),
ToolMessage(
tool_call_id="call_abcd09876",
content="observation4",
additional_kwargs={"name": "divide"},
),
message4,
ToolMessage(
tool_call_id="call_abc02468",
content="observation4",
additional_kwargs={"name": "exponentiate"},
),
]
output = format_to_openai_tool_messages(intermediate_steps)
assert output == expected_messages
def test_handles_empty_input_list() -> None:
output = format_to_openai_tool_messages([])
assert output == []
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16
from mmdet.registry import MODELS
@MODELS.register_module()
class FeatureRelayHead(BaseModule):
"""Feature Relay Head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
in_channels (int, optional): number of input channels. Default: 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Default: 256.
roi_feat_size (int, optional): roi feat size at box head. Default: 7.
scale_factor (int, optional): scale factor to match roi feat size
at mask head. Default: 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels=1024,
out_conv_channels=256,
roi_feat_size=7,
scale_factor=2,
init_cfg=dict(type='Kaiming', layer='Linear')):
super(FeatureRelayHead, self).__init__(init_cfg)
assert isinstance(roi_feat_size, int)
self.in_channels = in_channels
self.out_conv_channels = out_conv_channels
self.roi_feat_size = roi_feat_size
self.out_channels = (roi_feat_size**2) * out_conv_channels
self.scale_factor = scale_factor
self.fp16_enabled = False
self.fc = nn.Linear(self.in_channels, self.out_channels)
self.upsample = nn.Upsample(
scale_factor=scale_factor, mode='bilinear', align_corners=True)
@auto_fp16()
def forward(self, x):
"""Forward function."""
N, in_C = x.shape
if N > 0:
out_C = self.out_conv_channels
out_HW = self.roi_feat_size
x = self.fc(x)
x = x.reshape(N, out_C, out_HW, out_HW)
x = self.upsample(x)
return x
return None
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.builder import HEADS
@HEADS.register_module()
class FeatureRelayHead(BaseModule):
"""Feature Relay Head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
in_channels (int, optional): number of input channels. Default: 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Default: 256.
roi_feat_size (int, optional): roi feat size at box head. Default: 7.
scale_factor (int, optional): scale factor to match roi feat size
at mask head. Default: 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels=1024,
out_conv_channels=256,
roi_feat_size=7,
scale_factor=2,
init_cfg=dict(type='Kaiming', layer='Linear')):
super(FeatureRelayHead, self).__init__(init_cfg)
assert isinstance(roi_feat_size, int)
self.in_channels = in_channels
self.out_conv_channels = out_conv_channels
self.roi_feat_size = roi_feat_size
self.out_channels = (roi_feat_size**2) * out_conv_channels
self.scale_factor = scale_factor
self.fp16_enabled = False
self.fc = nn.Linear(self.in_channels, self.out_channels)
self.upsample = nn.Upsample(
scale_factor=scale_factor, mode='bilinear', align_corners=True)
@auto_fp16()
def forward(self, x):
"""Forward function."""
N, in_C = x.shape
if N > 0:
out_C = self.out_conv_channels
out_HW = self.roi_feat_size
x = self.fc(x)
x = x.reshape(N, out_C, out_HW, out_HW)
x = self.upsample(x)
return x
return None
|
import asyncio
import copy
from typing import Any, List, TYPE_CHECKING
from jina.serve.runtimes.servers import BaseServer
if TYPE_CHECKING:
from jina.logging.logger import JinaLogger
class CompositeBaseServer(BaseServer):
"""Composite Base Server implementation from which u can inherit a specific custom composite one"""
servers: List['BaseServer']
logger: 'JinaLogger'
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self._kwargs = kwargs
@property
def _server_kwargs(self):
ret = []
# ignore monitoring and tracing args since they are not copyable
ignored_attrs = [
'metrics_registry',
'tracer_provider',
'grpc_tracing_server_interceptors',
'aio_tracing_client_interceptors',
'tracing_client_interceptor',
]
for port, protocol in zip(self.ports, self.protocols):
# ignore monitoring and tracing args since they are not copyable
runtime_args = self._deepcopy_with_ignore_attrs(
self.runtime_args, ignored_attrs
)
runtime_args.port = port
runtime_args.protocol = protocol
server_kwargs = {
k: v for k, v in self._kwargs.items() if k != 'runtime_args'
}
server_kwargs['runtime_args'] = dict(vars(runtime_args))
server_kwargs['req_handler'] = self._request_handler
ret.append(server_kwargs)
return ret
@staticmethod
def _deepcopy_with_ignore_attrs(obj: Any, ignore_attrs: List[str]) -> Any:
"""Deep copy an object and ignore some attributes
:param obj: the object to copy
:param ignore_attrs: the attributes to ignore
:return: the copied object
"""
memo = {}
for k in ignore_attrs:
if hasattr(obj, k):
memo[id(getattr(obj, k))] = None # getattr(obj, k)
return copy.deepcopy(obj, memo)
async def setup_server(self):
"""
setup servers inside CompositeServer
"""
self.logger.debug(f'Setting up Composite server')
tasks = []
for server in self.servers:
tasks.append(asyncio.create_task(server.setup_server()))
await asyncio.gather(*tasks)
self.logger.debug(f'Composite server setup successful')
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
self.logger.debug(f'Shutting down server')
await super().shutdown()
shutdown_tasks = []
for server in self.servers:
shutdown_tasks.append(asyncio.create_task(server.shutdown()))
await asyncio.gather(*shutdown_tasks)
self.logger.debug(f'Server shutdown finished')
async def run_server(self):
"""Run servers inside CompositeServer forever"""
run_server_tasks = []
for server in self.servers:
run_server_tasks.append(asyncio.create_task(server.run_server()))
await asyncio.gather(*run_server_tasks)
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(server, 'should_exit', True) for server in self.servers
]
return all(should_exit_values)
class CompositeServer(CompositeBaseServer):
"""Composite Server implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.servers: List[BaseServer] = []
for server_kwargs in self._server_kwargs:
server_cls = _get_gateway_class(
server_kwargs['runtime_args']['protocol'],
works_as_load_balancer=self.works_as_load_balancer,
)
server = server_cls(**server_kwargs)
self.servers.append(server)
self.gateways = self.servers # for backwards compatibility
|
import asyncio
import copy
from typing import Any, List, TYPE_CHECKING
from jina.serve.runtimes.servers import BaseServer
if TYPE_CHECKING:
from jina.logging.logger import JinaLogger
class CompositeBaseServer(BaseServer):
"""Composite Base Server implementation from which u can inherit a specific custom composite one"""
servers: List['BaseServer']
logger: 'JinaLogger'
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self._kwargs = kwargs
@property
def _server_kwargs(self):
ret = []
# ignore monitoring and tracing args since they are not copyable
ignored_attrs = [
'metrics_registry',
'tracer_provider',
'grpc_tracing_server_interceptors',
'aio_tracing_client_interceptors',
'tracing_client_interceptor',
]
for port, protocol in zip(self.ports, self.protocols):
# ignore monitoring and tracing args since they are not copyable
runtime_args = self._deepcopy_with_ignore_attrs(
self.runtime_args, ignored_attrs
)
runtime_args.port = port
runtime_args.protocol = protocol
server_kwargs = {k: v for k, v in self._kwargs.items() if k != 'runtime_args'}
server_kwargs['runtime_args'] = dict(vars(runtime_args))
server_kwargs['req_handler'] = self._request_handler
ret.append(server_kwargs)
return ret
@staticmethod
def _deepcopy_with_ignore_attrs(obj: Any, ignore_attrs: List[str]) -> Any:
"""Deep copy an object and ignore some attributes
:param obj: the object to copy
:param ignore_attrs: the attributes to ignore
:return: the copied object
"""
memo = {}
for k in ignore_attrs:
if hasattr(obj, k):
memo[id(getattr(obj, k))] = None # getattr(obj, k)
return copy.deepcopy(obj, memo)
async def setup_server(self):
"""
setup servers inside CompositeServer
"""
self.logger.debug(f'Setting up Composite server')
tasks = []
for server in self.servers:
tasks.append(asyncio.create_task(server.setup_server()))
await asyncio.gather(*tasks)
self.logger.debug(f'Composite server setup successful')
async def shutdown(self):
"""Free other resources allocated with the server, e.g, gateway object, ..."""
self.logger.debug(f'Shutting down server')
await super().shutdown()
shutdown_tasks = []
for server in self.servers:
shutdown_tasks.append(asyncio.create_task(server.shutdown()))
await asyncio.gather(*shutdown_tasks)
self.logger.debug(f'Server shutdown finished')
async def run_server(self):
"""Run servers inside CompositeServer forever"""
run_server_tasks = []
for server in self.servers:
run_server_tasks.append(asyncio.create_task(server.run_server()))
await asyncio.gather(*run_server_tasks)
@property
def _should_exit(self) -> bool:
should_exit_values = [
getattr(server, 'should_exit', True) for server in self.servers
]
return all(should_exit_values)
class CompositeServer(CompositeBaseServer):
"""Composite Server implementation"""
def __init__(
self,
**kwargs,
):
"""Initialize the gateway
:param kwargs: keyword args
"""
super().__init__(**kwargs)
from jina.parsers.helper import _get_gateway_class
self.servers: List[BaseServer] = []
for server_kwargs in self._server_kwargs:
server_cls = _get_gateway_class(server_kwargs['runtime_args']['protocol'],
works_as_load_balancer=self.works_as_load_balancer)
server = server_cls(**server_kwargs)
self.servers.append(server)
self.gateways = self.servers # for backwards compatibility
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadPanopticAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 4),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_train2017.json',
img_prefix=data_root + 'train2017/',
seg_prefix=data_root + 'annotations/panoptic_train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_val2017.json',
img_prefix=data_root + 'val2017/',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_val2017.json',
img_prefix=data_root + 'val2017/',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric=['PQ'])
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadPanopticAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 4),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_train2017.json',
img_prefix=data_root + 'train2017/',
seg_prefix=data_root + 'annotations/panoptic_train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_val2017.json',
img_prefix=data_root + 'val2017/',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/panoptic_val2017.json',
img_prefix=data_root + 'val2017/',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric=['pq'])
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List
import torch
from torch.nn.parallel.distributed import DistributedDataParallel
from mmengine.data import BaseDataElement
from mmengine.optim import OptimWrapper
from mmengine.registry import MODEL_WRAPPERS
from ..utils import detect_anomalous_params
@MODEL_WRAPPERS.register_module()
class MMDistributedDataParallel(DistributedDataParallel):
"""A distributed model wrapper used for training,testing and validation in
loop.
Different from DistributedDataParallel, MMDistributedDataParallel
implements three methods :meth:`train_step`, :meth:`val_step` and
:meth:`test_step`, which will be called by ``train_loop``, ``val_loop``
and ``test_loop``.
- ``train_step``: Called by ``runner.train_loop``, and implement
default model forward, gradient back propagation, parameter updating
logic. To take advantage of DistributedDataParallel's automatic gradient
synchronization, ``train_step`` calls ``DistributedDataParallel.forward``
to calculate the losses, and call other methods of :obj:`BaseModel` to
pre-process data and parse losses. Finally, update model parameters by
:obj:``OptimWrapper`` and return the loss dictionary used for logging.
- ``val_step``: Called by ``runner.val_loop`` and get the inference
results. Since there is no gradient synchronization requirement,
this procedure is equivalent to ``BaseModel.val_step``
- ``test_step``: Called by ``runner.test_loop``, equivalent ``val_step``.
Args:
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with `loss` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Default: False.
*args: list arguments passed to ``DistributedDataParallel``
**kwargs: keyword arguments passed to ``DistributedDataParallel``.
Note:
If model has multiple submodules and each module has
separate optimization strategies,
:class:`MMSeparateDistributedDataParallel` should be used to wrap
the model.
Note:
If model itself has custom optimization strategy, rather than
simply forward model and update model. A custom model wrapper
inherit from ``MMDistributedDataParallel`` should be defined and
override the ``train_step`` method.
"""
def __init__(self, detect_anomalous_params: bool = False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.detect_anomalous_params = detect_anomalous_params
def train_step(self, data: List[dict],
optim_wrapper: OptimWrapper) -> Dict[str, torch.Tensor]:
"""Interface for model forward, backward and parameters updating during
training process.
:meth:`train_step` will perform the following steps in order:
- If :attr:`module` defines the preprocess method,
call ``module.preprocess`` to pre-processing data.
- Call ``module.forward(**data)`` and get losses.
- Parse losses.
- Call ``optim_wrapper.optimizer_step`` to update parameters.
- Return log messages of losses.
Args:
data (List[dict]): Data sampled by dataloader.
optim_wrapper (OptimWrapper): A wrapper of optimizer to
update parameters.
Returns:
Dict[str, torch.Tensor]: A ``dict`` of tensor for logging.
"""
# Enable automatic mixed precision training context.
with optim_wrapper.optim_context(self):
batch_inputs, data_samples = self.module.data_preprocessor(
data, training=True)
losses = self(batch_inputs, data_samples, mode='loss')
if self.detect_anomalous_params:
detect_anomalous_params(losses, model=self)
parsed_loss, log_vars = self.module.parse_losses(losses)
optim_wrapper.update_params(parsed_loss)
return log_vars
def val_step(self, data: List[dict]) -> List[BaseDataElement]:
"""Gets the prediction of module during validation process.
Args:
data (List[dict]): Data sampled by dataloader.
Returns:
List[BaseDataElement] or dict: The predictions of given data.
"""
return self.module.val_step(data)
def test_step(self, data: List[dict]) -> List[BaseDataElement]:
"""Gets the predictions of module during testing process.
Args:
data: Data sampled by dataloader.
Returns:
List[BaseDataElement]: The predictions of given data.
"""
return self.module.test_step(data)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List
import torch
from torch.nn.parallel.distributed import DistributedDataParallel
from mmengine.data import BaseDataElement
from mmengine.optim import OptimWrapper
from mmengine.registry import MODEL_WRAPPERS
from ..utils import detect_anomalous_params
@MODEL_WRAPPERS.register_module()
class MMDistributedDataParallel(DistributedDataParallel):
"""A distributed model wrapper used for training,testing and validation in
loop.
Different from DistributedDataParallel, MMDistributedDataParallel
implements three methods :meth:`train_step`, :meth:`val_step` and
:meth:`test_step`, which will be called by ``train_loop``, ``val_loop``
and ``test_loop``.
- ``train_step``: Called by ``runner.train_loop``, and implement
default model forward, gradient back propagation, parameter updating
logic. To take advantage of DistributedDataParallel's automatic gradient
synchronization, ``train_step`` calls ``DistributedDataParallel.forward``
to calculate the losses, and call other methods of :obj:`BaseModel` to
pre-process data and parse losses. Finally, update model parameters by
:obj:``OptimWrapper`` and return the loss dictionary used for logging.
- ``val_step``: Called by ``runner.val_loop`` and get the inference
results. Since there is no gradient synchronization requirement,
this procedure is equivalent to ``BaseModel.val_step``
- ``test_step``: Called by ``runner.test_loop``, equivalent ``val_step``.
Args:
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with `loss` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Default: False.
*args: list arguments passed to ``DistributedDataParallel``
**kwargs: keyword arguments passed to ``DistributedDataParallel``.
Note:
If model has multiple submodules and each module has
separate optimization strategies,
:class:`MMSeparateDistributedDataParallel` should be used to wrap
the model.
Note:
If model itself has custom optimization strategy, rather than
simply forward model and update model. A custom model wrapper
inherit from ``MMDistributedDataParallel`` should be defined and
override the ``train_step`` method.
"""
def __init__(self, detect_anomalous_params: bool = False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.detect_anomalous_params = detect_anomalous_params
def train_step(self, data: List[dict],
optim_wrapper: OptimWrapper) -> Dict[str, torch.Tensor]:
"""Interface for model forward, backward and parameters updating during
training process.
:meth:`train_step` will perform the following steps in order:
- If :attr:`module` defines the preprocess method,
call ``module.preprocess`` to pre-processing data.
- Call ``module.forward(**data)`` and get losses.
- Parse losses.
- Call ``optim_wrapper.optimizer_step`` to update parameters.
- Return log messages of losses.
Args:
data (List[dict]): Data sampled by dataloader.
optim_wrapper (OptimWrapper): A wrapper of optimizer to
update parameters.
Returns:
Dict[str, torch.Tensor]: A ``dict`` of tensor for logging.
"""
# enable automatic mixed precision training context.
with optim_wrapper.precision_context():
batch_inputs, data_samples = self.module.data_preprocessor(
data, training=True)
losses = self(batch_inputs, data_samples, mode='loss')
if self.detect_anomalous_params:
detect_anomalous_params(losses, model=self)
parsed_loss, log_vars = self.module.parse_losses(losses)
optim_wrapper.update_params(parsed_loss)
return log_vars
def val_step(self, data: List[dict]) -> List[BaseDataElement]:
"""Gets the prediction of module during validation process.
Args:
data (List[dict]): Data sampled by dataloader.
Returns:
List[BaseDataElement] or dict: The predictions of given data.
"""
return self.module.val_step(data)
def test_step(self, data: List[dict]) -> List[BaseDataElement]:
"""Gets the predictions of module during testing process.
Args:
data: Data sampled by dataloader.
Returns:
List[BaseDataElement]: The predictions of given data.
"""
return self.module.test_step(data)
|
from typing import Union, Iterable, Dict
import warnings
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _upload_batch(self, docs: Iterable['Document']):
batch = []
failed_index = []
for doc in docs:
batch.append(self._document_to_elastic(doc))
if len(batch) > self._config.batch_size:
failed_index.extend(self._send_requests(batch))
self._refresh(self._config.index_name)
batch = []
if len(batch) > 0:
failed_index.extend(self._send_requests(batch))
self._refresh(self._config.index_name)
return failed_index
def extend(self, docs: Iterable['Document']):
docs = list(docs)
failed_index = self._upload_batch(docs)
failed_ids = [index['_id'] for index in failed_index]
self._offset2ids.extend(
[
doc.id
for doc in docs
if (doc.id not in self._offset2ids.ids) and (doc.id not in failed_ids)
]
)
if len(failed_ids) > 0:
err_msg = f'fail to add Documents with ids: {failed_ids}'
warnings.warn(err_msg)
raise IndexError(err_msg)
|
from typing import Union, Iterable, Dict
from ..base.seqlike import BaseSequenceLikeMixin
from .... import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_elastic(doc))
if len(batch) > self._config.batch_size:
self._send_requests(batch)
self._refresh(self._config.index_name)
batch = []
if len(batch) > 0:
self._send_requests(batch)
self._refresh(self._config.index_name)
def extend(self, docs: Iterable['Document']):
docs = list(docs)
self._upload_batch(docs)
self._offset2ids.extend(
[doc.id for doc in docs if doc.id not in self._offset2ids.ids]
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_batch_norm, has_method, import_modules_from_strings,
is_list_of, is_method_overridden, is_seq_of, is_str,
is_tuple_of, iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .package_utils import (call_command, check_install_package,
get_installed_path, is_installed)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .setup_env import set_multi_processing
from .sync_bn import revert_sync_batchnorm
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'ManagerMeta', 'ManagerMixin', 'set_multi_processing', 'has_batch_norm',
'is_abs', 'is_installed', 'call_command', 'get_installed_path',
'check_install_package', 'is_abs', 'revert_sync_batchnorm'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_batch_norm, has_method, import_modules_from_strings,
is_list_of, is_method_overridden, is_seq_of, is_str,
is_tuple_of, iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .setup_env import set_multi_processing
from .sync_bn import revert_sync_batchnorm
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'ManagerMeta', 'ManagerMixin', 'set_multi_processing', 'has_batch_norm',
'is_abs', 'revert_sync_batchnorm'
]
|
import os
from pathlib import Path
import cv2
import pytest
from jina import Document, DocumentArray, Executor
from yolov5_segmenter import YoloV5Segmenter
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_load():
segmenter = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert segmenter.model_name_or_path == 'yolov5s'
@pytest.mark.parametrize(
'model_path',
[
os.path.join(cur_dir, '../data/models/yolov5s.pt'),
os.path.join(cur_dir, '../data/models/yolov5m.pt'),
'yolov5s',
'yolov5m',
],
)
def test_model_name_or_path(build_da, model_path):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) > 0
for chunk in doc.chunks:
assert chunk.blob.ndim == 3
assert chunk.tags.get('label')
assert chunk.tags.get('conf')
@pytest.mark.parametrize(
'model_path, expected_detections',
[
(
os.path.join(cur_dir, '../data/models/yolov5s.pt'),
{'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3},
),
(
os.path.join(cur_dir, '../data/models/yolov5m.pt'),
{'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3},
),
],
)
def test_n_detections(build_da, model_path, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
@pytest.mark.parametrize(
'confidence_threshold, expected_detections',
[
(0.3, {'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.5, {'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.8, {'bus.jpg': 3, 'zidane.jpg': 2, 'man.jpg': 0}),
],
)
def test_confidence_threshold(build_da, confidence_threshold, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(
model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt'),
default_confidence_threshold=confidence_threshold,
)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
assert all(chunk.tags['conf'] >= confidence_threshold for chunk in doc.chunks)
def test_traversal_paths():
da = DocumentArray(
[
Document(
id='root',
blob=cv2.imread(os.path.join(cur_dir, '../data/img/man.jpg')),
),
]
)
segmenter = YoloV5Segmenter(
model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt')
)
segmenter.segment(da, parameters={})
# detects 2 persons and 1 cell phone
assert len(da[0].chunks) == 3
assert da[0].chunks[0].tags['label'] == 'person'
assert da[0].chunks[1].tags['label'] == 'person'
assert da[0].chunks[2].tags['label'] == 'cell phone'
segmenter.segment(da, parameters={'traversal_paths': ['c']})
# the first detected person spans the whole image, so segmenting the chunk produces 3 detections
person_chunk = da[0].chunks[0]
assert len(person_chunk.chunks) == 3
assert person_chunk.chunks[0].tags['label'] == 'person'
assert person_chunk.chunks[1].tags['label'] == 'person'
assert person_chunk.chunks[2].tags['label'] == 'cell phone'
|
import os
from pathlib import Path
import cv2
import pytest
from jina import Document, DocumentArray, Executor
from ...yolov5_segmenter import YoloV5Segmenter
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_load():
segmenter = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert segmenter.model_name_or_path == 'yolov5s'
@pytest.mark.parametrize(
'model_path',
[
os.path.join(cur_dir, '../data/models/yolov5s.pt'),
os.path.join(cur_dir, '../data/models/yolov5m.pt'),
'yolov5s',
'yolov5m',
],
)
def test_model_name_or_path(build_da, model_path):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) > 0
for chunk in doc.chunks:
assert chunk.blob.ndim == 3
assert chunk.tags.get('label')
assert chunk.tags.get('conf')
@pytest.mark.parametrize(
'model_path, expected_detections',
[
(
os.path.join(cur_dir, '../data/models/yolov5s.pt'),
{'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3},
),
(
os.path.join(cur_dir, '../data/models/yolov5m.pt'),
{'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3},
),
],
)
def test_n_detections(build_da, model_path, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
@pytest.mark.parametrize(
'confidence_threshold, expected_detections',
[
(0.3, {'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.5, {'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.8, {'bus.jpg': 3, 'zidane.jpg': 2, 'man.jpg': 0}),
],
)
def test_confidence_threshold(build_da, confidence_threshold, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(
model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt'),
default_confidence_threshold=confidence_threshold,
)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
assert all(chunk.tags['conf'] >= confidence_threshold for chunk in doc.chunks)
def test_traversal_paths():
da = DocumentArray(
[
Document(
id='root',
blob=cv2.imread(os.path.join(cur_dir, '../data/img/man.jpg')),
),
]
)
segmenter = YoloV5Segmenter(
model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt')
)
segmenter.segment(da, parameters={})
# detects 2 persons and 1 cell phone
assert len(da[0].chunks) == 3
assert da[0].chunks[0].tags['label'] == 'person'
assert da[0].chunks[1].tags['label'] == 'person'
assert da[0].chunks[2].tags['label'] == 'cell phone'
segmenter.segment(da, parameters={'traversal_paths': ['c']})
# the first detected person spans the whole image, so segmenting the chunk produces 3 detections
person_chunk = da[0].chunks[0]
assert len(person_chunk.chunks) == 3
assert person_chunk.chunks[0].tags['label'] == 'person'
assert person_chunk.chunks[1].tags['label'] == 'person'
assert person_chunk.chunks[2].tags['label'] == 'cell phone'
|
from os.path import join
from typing import Any, Callable, List, Optional, Tuple
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, list_dir, list_files
from .vision import VisionDataset
class Omniglot(VisionDataset):
"""`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``omniglot-py`` exists.
background (bool, optional): If True, creates dataset from the "background" set, otherwise
creates from the "evaluation" set. This terminology is defined by the authors.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset zip files from the internet and
puts it in root directory. If the zip files are already downloaded, they are not
downloaded again.
"""
folder = "omniglot-py"
download_url_prefix = "https://raw.githubusercontent.com/brendenlake/omniglot/master/python"
zips_md5 = {
"images_background": "68d2efa1b9178cc56df9314c21c6e718",
"images_evaluation": "6b91aef0f799c5bb55b94e3f2daec811",
}
def __init__(
self,
root: str,
background: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(join(root, self.folder), transform=transform, target_transform=target_transform)
self.background = background
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self.target_folder = join(self.root, self._get_target_folder())
self._alphabets = list_dir(self.target_folder)
self._characters: List[str] = sum(
([join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets), []
)
self._character_images = [
[(image, idx) for image in list_files(join(self.target_folder, character), ".png")]
for idx, character in enumerate(self._characters)
]
self._flat_character_images: List[Tuple[str, int]] = sum(self._character_images, [])
def __len__(self) -> int:
return len(self._flat_character_images)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
image_name, character_class = self._flat_character_images[index]
image_path = join(self.target_folder, self._characters[character_class], image_name)
image = Image.open(image_path, mode="r").convert("L")
if self.transform:
image = self.transform(image)
if self.target_transform:
character_class = self.target_transform(character_class)
return image, character_class
def _check_integrity(self) -> bool:
zip_filename = self._get_target_folder()
if not check_integrity(join(self.root, zip_filename + ".zip"), self.zips_md5[zip_filename]):
return False
return True
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
filename = self._get_target_folder()
zip_filename = filename + ".zip"
url = self.download_url_prefix + "/" + zip_filename
download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename])
def _get_target_folder(self) -> str:
return "images_background" if self.background else "images_evaluation"
|
from os.path import join
from typing import Any, Callable, List, Optional, Tuple
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, list_dir, list_files
from .vision import VisionDataset
class Omniglot(VisionDataset):
"""`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``omniglot-py`` exists.
background (bool, optional): If True, creates dataset from the "background" set, otherwise
creates from the "evaluation" set. This terminology is defined by the authors.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset zip files from the internet and
puts it in root directory. If the zip files are already downloaded, they are not
downloaded again.
"""
folder = "omniglot-py"
download_url_prefix = "https://raw.githubusercontent.com/brendenlake/omniglot/master/python"
zips_md5 = {
"images_background": "68d2efa1b9178cc56df9314c21c6e718",
"images_evaluation": "6b91aef0f799c5bb55b94e3f2daec811",
}
def __init__(
self,
root: str,
background: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(join(root, self.folder), transform=transform, target_transform=target_transform)
self.background = background
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self.target_folder = join(self.root, self._get_target_folder())
self._alphabets = list_dir(self.target_folder)
self._characters: List[str] = sum(
([join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets), []
)
self._character_images = [
[(image, idx) for image in list_files(join(self.target_folder, character), ".png")]
for idx, character in enumerate(self._characters)
]
self._flat_character_images: List[Tuple[str, int]] = sum(self._character_images, [])
def __len__(self) -> int:
return len(self._flat_character_images)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
image_name, character_class = self._flat_character_images[index]
image_path = join(self.target_folder, self._characters[character_class], image_name)
image = Image.open(image_path, mode="r").convert("L")
if self.transform:
image = self.transform(image)
if self.target_transform:
character_class = self.target_transform(character_class)
return image, character_class
def _check_integrity(self) -> bool:
zip_filename = self._get_target_folder()
if not check_integrity(join(self.root, zip_filename + ".zip"), self.zips_md5[zip_filename]):
return False
return True
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
filename = self._get_target_folder()
zip_filename = filename + ".zip"
url = self.download_url_prefix + "/" + zip_filename
download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename])
def _get_target_folder(self) -> str:
return "images_background" if self.background else "images_evaluation"
|
from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores.qdrant import QdrantVectorStore
import qdrant_client
import pytest_asyncio
@pytest_asyncio.fixture
async def vector_store() -> QdrantVectorStore:
client = qdrant_client.QdrantClient(":memory:")
aclient = qdrant_client.AsyncQdrantClient(":memory:")
vector_store = QdrantVectorStore("test", client=client, aclient=aclient)
nodes = [
TextNode(
text="test1",
id_="11111111-1111-1111-1111-111111111111",
embedding=[1.0, 0.0],
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test2",
id_="22222222-2222-2222-2222-222222222222",
embedding=[0.0, 1.0],
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test3",
id_="33333333-3333-3333-3333-333333333333",
embedding=[1.0, 1.0],
),
]
vector_store.add(nodes)
# in-memory client does not share data between instances
await vector_store.async_add(nodes)
return vector_store
@pytest_asyncio.fixture
async def hybrid_vector_store() -> QdrantVectorStore:
client = qdrant_client.QdrantClient(":memory:")
aclient = qdrant_client.AsyncQdrantClient(":memory:")
vector_store = QdrantVectorStore(
"test",
client=client,
aclient=aclient,
enable_hybrid=True,
fastembed_sparse_model="Qdrant/bm25",
)
nodes = [
TextNode(
text="test1",
id_="11111111-1111-1111-1111-111111111111",
embedding=[1.0, 0.0],
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test2",
id_="22222222-2222-2222-2222-222222222222",
embedding=[0.0, 1.0],
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test3",
id_="33333333-3333-3333-3333-333333333333",
embedding=[1.0, 1.0],
),
]
vector_store.add(nodes)
# in-memory client does not share data between instances
await vector_store.async_add(nodes)
return vector_store
|
from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores.qdrant import QdrantVectorStore
import qdrant_client
import pytest_asyncio
@pytest_asyncio.fixture
async def vector_store() -> QdrantVectorStore:
client = qdrant_client.QdrantClient(":memory:")
aclient = qdrant_client.AsyncQdrantClient(":memory:")
vector_store = QdrantVectorStore("test", client=client, aclient=aclient)
nodes = [
TextNode(
text="test1",
id_="11111111-1111-1111-1111-111111111111",
embedding=[1.0, 0.0],
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test2",
id_="22222222-2222-2222-2222-222222222222",
embedding=[0.0, 1.0],
relationships={NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test-0")},
),
TextNode(
text="test3",
id_="33333333-3333-3333-3333-333333333333",
embedding=[1.0, 1.0],
),
]
vector_store.add(nodes)
# in-memory client does not share data between instances
await vector_store.async_add(nodes)
return vector_store
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
from docarray.typing import AnyEmbedding, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDoc):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray of shape `(n_samples, 3)` and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain:
- a [`PointCloud3DUrl`][docarray.typing.url.PointCloud3DUrl] (`PointCloud3D.url`)
- a [`PointsAndColors`][docarray.documents.point_cloud.points_and_colors.PointsAndColors] object (`PointCloud3D.tensors`)
- an [`AnyEmbedding`](../../../../api_references/typing/tensor/embedding) (`PointCloud3D.embedding`)
- a `bytes` object (`PointCloud3D.bytes_`)
You can use this Document directly:
```python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
# model = MyEmbeddingModel()
# pc.embedding = model(pc.tensors.points)
```
You can extend this Document:
```python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
# model = MyEmbeddingModel()
# pc.embedding = model(pc.tensors.points)
# pc.second_embedding = model(pc.tensors.colors)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import PointCloud3D, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
point_cloud: PointCloud3D
text: TextDoc
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensors = mmdoc.point_cloud.url.load(samples=100)
# or
mmdoc.point_cloud.bytes_ = mmdoc.point_cloud.url.load_bytes()
```
You can display your point cloud from either its url, or its tensors:
```python
from docarray.documents import PointCloud3D
# display from url
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
# pc.url.display()
# display from tensors
pc.tensors = pc.url.load(samples=10000)
# pc.tensors.display()
```
"""
url: Optional[PointCloud3DUrl]
tensors: Optional[PointsAndColors]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensors=PointsAndColors(points=value))
return super().validate(value)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
from docarray.typing import AnyEmbedding, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDoc):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`),
a PointsAndColors object (`PointCloud3D.tensors`), and an AnyEmbedding
(`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
You can extend this Document:
.. code-block:: python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
pc.second_embedding = model(pc.tensors.colors)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDoc
from docarray.documents import PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDoc):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensors = mmdoc.point_cloud.url.load(samples=100)
# or
mmdoc.point_cloud.bytes_ = mmdoc.point_cloud.url.load_bytes()
You can display your point cloud from either its url, or its tensors:
.. code-block:: python
from docarray.documents import PointCloud3D
# display from url
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.url.display()
# display from tensors
pc.tensors = pc.url.load(samples=10000)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
"""
url: Optional[PointCloud3DUrl]
tensors: Optional[PointsAndColors]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensors=PointsAndColors(points=value))
return super().validate(value)
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
if torch.cuda.is_available():
device_properties = torch.cuda.get_device_properties(0)
total_memory = device_properties.total_memory / (1024**3)
print(f"CUDA memory: {total_memory} GB")
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
|
"""
Tool implementations for the Riza (https://riza.io) code interpreter API.
Documentation: https://docs.riza.io
API keys: https://dashboard.riza.io
"""
from typing import Any, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool, ToolException
from pydantic import BaseModel, Field
class ExecPythonInput(BaseModel):
code: str = Field(description="the Python code to execute")
class ExecPython(BaseTool):
"""Riza Code tool.
Setup:
Install ``langchain-community`` and ``rizaio`` and set environment variable ``RIZA_API_KEY``.
.. code-block:: bash
pip install -U langchain-community rizaio
export RIZA_API_KEY="your-api-key"
Instantiation:
.. code-block:: python
from langchain_community.tools.riza.command import ExecPython
tool = ExecPython()
Invocation with args:
.. code-block:: python
tool.invoke("x = 5; print(x)")
.. code-block:: python
'5\\n'
Invocation with ToolCall:
.. code-block:: python
tool.invoke({"args": {"code":"x = 5; print(x)"}, "id": "1", "name": tool.name, "type": "tool_call"})
.. code-block:: python
tool.invoke({"args": {"code":"x = 5; print(x)"}, "id": "1", "name": tool.name, "type": "tool_call"})
""" # noqa: E501
name: str = "riza_exec_python"
description: str = """Execute Python code to solve problems.
The Python runtime does not have filesystem access. You can use the httpx
or requests library to make HTTP requests. Always print output to stdout."""
args_schema: Type[BaseModel] = ExecPythonInput
handle_tool_error: bool = True
client: Any = None
runtime_revision_id: Optional[str] = None
def __init__(
self, runtime_revision_id: Optional[str] = None, **kwargs: Any
) -> None:
try:
from rizaio import Riza
except ImportError as e:
raise ImportError(
"Couldn't import the `rizaio` package. "
"Try running `pip install rizaio`."
) from e
super().__init__(**kwargs)
self.client = Riza()
self.runtime_revision_id = runtime_revision_id
def _run(
self, code: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
output = self.client.command.exec(
runtime_revision_id=self.runtime_revision_id, language="python", code=code
)
if output.exit_code > 0:
raise ToolException(
f"Riza code execution returned a non-zero exit code. "
f"The output captured from stderr was:\n{output.stderr}"
)
return output.stdout
class ExecJavaScriptInput(BaseModel):
code: str = Field(description="the JavaScript code to execute")
class ExecJavaScript(BaseTool):
"""A tool implementation to execute JavaScript via Riza's Code Interpreter API."""
name: str = "riza_exec_javascript"
description: str = """Execute JavaScript code to solve problems.
The JavaScript runtime does not have filesystem access, but can use fetch
to make HTTP requests and does include the global JSON object. Always print
output to stdout."""
args_schema: Type[BaseModel] = ExecJavaScriptInput
handle_tool_error: bool = True
client: Any = None
runtime_revision_id: Optional[str] = None
def __init__(
self, runtime_revision_id: Optional[str] = None, **kwargs: Any
) -> None:
try:
from rizaio import Riza
except ImportError as e:
raise ImportError(
"Couldn't import the `rizaio` package. "
"Try running `pip install rizaio`."
) from e
super().__init__(**kwargs)
self.client = Riza()
self.runtime_revision_id = runtime_revision_id
def _run(
self, code: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
output = self.client.command.exec(
runtime_revision_id=self.runtime_revision_id,
language="javascript",
code=code,
)
if output.exit_code > 0:
raise ToolException(
f"Riza code execution returned a non-zero exit code. "
f"The output captured from stderr was:\n{output.stderr}"
)
return output.stdout
|
"""
Tool implementations for the Riza (https://riza.io) code interpreter API.
Documentation: https://docs.riza.io
API keys: https://dashboard.riza.io
"""
from typing import Any, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool, ToolException
from pydantic import BaseModel, Field
class ExecPythonInput(BaseModel):
code: str = Field(description="the Python code to execute")
class ExecPython(BaseTool): # type: ignore[override, override]
"""Riza Code tool.
Setup:
Install ``langchain-community`` and ``rizaio`` and set environment variable ``RIZA_API_KEY``.
.. code-block:: bash
pip install -U langchain-community rizaio
export RIZA_API_KEY="your-api-key"
Instantiation:
.. code-block:: python
from langchain_community.tools.riza.command import ExecPython
tool = ExecPython()
Invocation with args:
.. code-block:: python
tool.invoke("x = 5; print(x)")
.. code-block:: python
'5\\n'
Invocation with ToolCall:
.. code-block:: python
tool.invoke({"args": {"code":"x = 5; print(x)"}, "id": "1", "name": tool.name, "type": "tool_call"})
.. code-block:: python
tool.invoke({"args": {"code":"x = 5; print(x)"}, "id": "1", "name": tool.name, "type": "tool_call"})
""" # noqa: E501
name: str = "riza_exec_python"
description: str = """Execute Python code to solve problems.
The Python runtime does not have filesystem access. You can use the httpx
or requests library to make HTTP requests. Always print output to stdout."""
args_schema: Type[BaseModel] = ExecPythonInput
handle_tool_error: bool = True
client: Any = None
runtime_revision_id: Optional[str] = None
def __init__(
self, runtime_revision_id: Optional[str] = None, **kwargs: Any
) -> None:
try:
from rizaio import Riza
except ImportError as e:
raise ImportError(
"Couldn't import the `rizaio` package. "
"Try running `pip install rizaio`."
) from e
super().__init__(**kwargs)
self.client = Riza()
self.runtime_revision_id = runtime_revision_id
def _run(
self, code: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
output = self.client.command.exec(
runtime_revision_id=self.runtime_revision_id, language="python", code=code
)
if output.exit_code > 0:
raise ToolException(
f"Riza code execution returned a non-zero exit code. "
f"The output captured from stderr was:\n{output.stderr}"
)
return output.stdout
class ExecJavaScriptInput(BaseModel):
code: str = Field(description="the JavaScript code to execute")
class ExecJavaScript(BaseTool): # type: ignore[override, override]
"""A tool implementation to execute JavaScript via Riza's Code Interpreter API."""
name: str = "riza_exec_javascript"
description: str = """Execute JavaScript code to solve problems.
The JavaScript runtime does not have filesystem access, but can use fetch
to make HTTP requests and does include the global JSON object. Always print
output to stdout."""
args_schema: Type[BaseModel] = ExecJavaScriptInput
handle_tool_error: bool = True
client: Any = None
runtime_revision_id: Optional[str] = None
def __init__(
self, runtime_revision_id: Optional[str] = None, **kwargs: Any
) -> None:
try:
from rizaio import Riza
except ImportError as e:
raise ImportError(
"Couldn't import the `rizaio` package. "
"Try running `pip install rizaio`."
) from e
super().__init__(**kwargs)
self.client = Riza()
self.runtime_revision_id = runtime_revision_id
def _run(
self, code: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
output = self.client.command.exec(
runtime_revision_id=self.runtime_revision_id,
language="javascript",
code=code,
)
if output.exit_code > 0:
raise ToolException(
f"Riza code execution returned a non-zero exit code. "
f"The output captured from stderr was:\n{output.stderr}"
)
return output.stdout
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.parrots_wrapper import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .averaged_model import (ExponentialMovingAverage, MomentumAnnealingEMA,
StochasticWeightAverage)
from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from .base_module import BaseModule, ModuleDict, ModuleList, Sequential
from .utils import detect_anomalous_params, merge_dict, stack_batch
from .wrappers import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel, is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper', 'StochasticWeightAverage',
'ExponentialMovingAverage', 'MomentumAnnealingEMA', 'BaseModel',
'BaseDataPreprocessor', 'ImgDataPreprocessor',
'MMSeparateDistributedDataParallel', 'BaseModule', 'stack_batch',
'merge_dict', 'detect_anomalous_params', 'ModuleList', 'ModuleDict',
'Sequential'
]
if digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
from .wrappers import MMFullyShardedDataParallel # noqa:F401
__all__.append('MMFullyShardedDataParallel')
|
# Copyright (c) OpenMMLab. All rights reserved.
from .averaged_model import (ExponentialMovingAverage, MomentumAnnealingEMA,
StochasticWeightAverage)
from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from .base_module import BaseModule, ModuleDict, ModuleList, Sequential
from .utils import detect_anomalous_params, merge_dict, stack_batch
from .wrappers import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel, is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper', 'StochasticWeightAverage',
'ExponentialMovingAverage', 'MomentumAnnealingEMA', 'BaseModel',
'BaseDataPreprocessor', 'ImgDataPreprocessor',
'MMSeparateDistributedDataParallel', 'BaseModule', 'stack_batch',
'merge_dict', 'detect_anomalous_params', 'ModuleList', 'ModuleDict',
'Sequential'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.1'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-dc5.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = [
'../_base_/models/faster_rcnn_r50_caffe_dc5.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer, util
class DistillKLDivLoss(nn.Module):
# TODO
def __init__(self, model: SentenceTransformer, similarity_fct=util.pairwise_dot_score) -> None:
super().__init__()
self.model = model
self.similarity_fct = similarity_fct
self.loss_fct = nn.KLDivLoss(reduction="none")
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings, labels)
def compute_loss_from_embeddings(self, embeddings: list[Tensor], labels: Tensor) -> Tensor:
embeddings_query = embeddings[0]
embeddings_pos = embeddings[1]
embeddings_neg = embeddings[2]
# Compute student scores
student_scores_pos = self.similarity_fct(embeddings_query, embeddings_pos)
student_scores_neg = self.similarity_fct(embeddings_query, embeddings_neg)
# Pack into one tensor and apply log_softmax
student_scores = torch.stack([student_scores_pos, student_scores_neg], dim=1)
student_log_probs = torch.log_softmax(student_scores, dim=1)
# Labels contain teacher similarity scores (already computed before training)
# We expect labels to contain the teacher_pos_score and teacher_neg_score
teacher_pos_scores = labels[:, 0]
teacher_neg_scores = labels[:, 1]
teacher_scores = torch.stack([teacher_pos_scores, teacher_neg_scores], dim=1)
teacher_probs = torch.softmax(teacher_scores, dim=1)
# KL Divergence
loss = self.loss_fct(student_log_probs, teacher_probs).sum(dim=1).mean()
return loss
@property
def citation(self) -> str:
return """
@misc{lin2020distillingdenserepresentationsranking,
title={Distilling Dense Representations for Ranking using Tightly-Coupled Teachers},
author={Sheng-Chieh Lin and Jheng-Hong Yang and Jimmy Lin},
year={2020},
eprint={2010.11386},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2010.11386},
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer, util
class DistillKLDivLoss(nn.Module):
# TODO
def __init__(self, model: SentenceTransformer, similarity_fct=util.pairwise_dot_score) -> None:
super().__init__()
self.model = model
self.similarity_fct = similarity_fct
self.loss_fct = nn.KLDivLoss(reduction="none")
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# sentence_features: query, positive passage, negative passage
reps = [self.model(feat)["sentence_embedding"] for feat in sentence_features]
embeddings_query = reps[0]
embeddings_pos = reps[1]
embeddings_neg = reps[2]
# Compute student scores
student_scores_pos = self.similarity_fct(embeddings_query, embeddings_pos)
student_scores_neg = self.similarity_fct(embeddings_query, embeddings_neg)
# Pack into one tensor and apply log_softmax
student_scores = torch.stack([student_scores_pos, student_scores_neg], dim=1)
student_log_probs = torch.log_softmax(student_scores, dim=1)
# Labels contain teacher similarity scores (already computed before training)
# We expect labels to contain the teacher_pos_score and teacher_neg_score
teacher_pos_scores = labels[:, 0]
teacher_neg_scores = labels[:, 1]
teacher_scores = torch.stack([teacher_pos_scores, teacher_neg_scores], dim=1)
teacher_probs = torch.softmax(teacher_scores, dim=1)
# KL Divergence
loss = self.loss_fct(student_log_probs, teacher_probs).sum(dim=1).mean()
return loss
@property
def citation(self) -> str:
return """
@misc{lin2020distillingdenserepresentationsranking,
title={Distilling Dense Representations for Ranking using Tightly-Coupled Teachers},
author={Sheng-Chieh Lin and Jheng-Hong Yang and Jimmy Lin},
year={2020},
eprint={2010.11386},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2010.11386},
}
"""
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
columns: Optional[Union[List[Tuple[str, str]], Dict[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='str', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='int', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from docarray.math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _normalize_columns(self, columns):
columns = super()._normalize_columns(columns)
for key in columns.keys():
columns[key] = self._map_type(columns[key])
return columns
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
import os
if 'data_path' not in config_subindex:
config_joined['data_path'] = os.path.join(
config_joined['data_path'], 'subindex_' + subindex_name
)
return config_joined
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
subindex_configs: Optional[Dict] = None,
**kwargs,
):
from docarray import Document
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
self._config.columns = self._normalize_columns(self._config.columns)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
columns: Optional[List[Tuple[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='str', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='int', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from docarray.math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _normalize_columns(self, columns):
columns = super()._normalize_columns(columns)
for i in range(len(columns)):
columns[i] = (
columns[i][0],
self._map_type(columns[i][1]),
)
return columns
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
import os
if 'data_path' not in config_subindex:
config_joined['data_path'] = os.path.join(
config_joined['data_path'], 'subindex_' + subindex_name
)
return config_joined
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
subindex_configs: Optional[Dict] = None,
**kwargs,
):
from docarray import Document
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
self._config.columns = self._normalize_columns(self._config.columns)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
from langchain_core.prompts import PromptTemplate
prompt_template = """Write a concise summary of the following:
"{text}"
CONCISE SUMMARY:"""
PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
|
# flake8: noqa
from langchain_core.prompts import PromptTemplate
prompt_template = """Write a concise summary of the following:
"{text}"
CONCISE SUMMARY:"""
PROMPT = PromptTemplate(template=prompt_template, input_variables=["text"])
|
from typing import List, cast
import numpy as np
from distributed import Client, Scheduler, Worker, get_worker
from distributed.utils_test import gen_cluster
import xgboost as xgb
from xgboost import testing as tm
from xgboost.compat import concat
def run_external_memory(worker_id: int, n_workers: int, comm_args: dict) -> None:
n_samples_per_batch = 32
n_features = 4
n_batches = 16
use_cupy = False
n_threads = get_worker().state.nthreads
with xgb.collective.CommunicatorContext(dmlc_communicator="rabit", **comm_args):
it = tm.IteratorForTest(
*tm.make_batches(
n_samples_per_batch,
n_features,
n_batches,
use_cupy,
random_state=worker_id,
),
cache="cache",
)
Xy = xgb.DMatrix(it, nthread=n_threads)
results: xgb.callback.TrainingCallback.EvalsLog = {}
booster = xgb.train(
{"tree_method": "hist", "nthread": n_threads},
Xy,
evals=[(Xy, "Train")],
num_boost_round=32,
evals_result=results,
)
assert tm.non_increasing(cast(List[float], results["Train"]["rmse"]))
lx, ly, lw = [], [], []
for i in range(n_workers):
x, y, w = tm.make_batches(
n_samples_per_batch,
n_features,
n_batches,
use_cupy,
random_state=i,
)
lx.extend(x)
ly.extend(y)
lw.extend(w)
X = concat(lx)
yconcat = concat(ly)
wconcat = concat(lw)
Xy = xgb.DMatrix(X, yconcat, weight=wconcat, nthread=n_threads)
results_local: xgb.callback.TrainingCallback.EvalsLog = {}
booster = xgb.train(
{"tree_method": "hist", "nthread": n_threads},
Xy,
evals=[(Xy, "Train")],
num_boost_round=32,
evals_result=results_local,
)
np.testing.assert_allclose(
results["Train"]["rmse"], results_local["Train"]["rmse"], rtol=1e-4
)
@gen_cluster(client=True)
async def test_external_memory(
client: Client, s: Scheduler, a: Worker, b: Worker
) -> None:
workers = tm.get_client_workers(client)
args = await client.sync(
xgb.dask._get_rabit_args,
len(workers),
None,
client,
)
n_workers = len(workers)
futs = client.map(
run_external_memory, range(n_workers), n_workers=n_workers, comm_args=args
)
await client.gather(futs)
|
from typing import List, cast
import numpy as np
from distributed import Client, Scheduler, Worker, get_worker
from distributed.utils_test import gen_cluster
import xgboost as xgb
from xgboost import testing as tm
from xgboost.compat import concat
def run_external_memory(worker_id: int, n_workers: int, comm_args: dict) -> None:
n_samples_per_batch = 32
n_features = 4
n_batches = 16
use_cupy = False
n_threads = get_worker().state.nthreads
with xgb.collective.CommunicatorContext(dmlc_communicator="rabit", **comm_args):
it = tm.IteratorForTest(
*tm.make_batches(
n_samples_per_batch,
n_features,
n_batches,
use_cupy,
random_state=worker_id,
),
cache="cache",
)
Xy = xgb.DMatrix(it, nthread=n_threads)
results: xgb.callback.TrainingCallback.EvalsLog = {}
booster = xgb.train(
{"tree_method": "hist", "nthread": n_threads},
Xy,
evals=[(Xy, "Train")],
num_boost_round=32,
evals_result=results,
)
assert tm.non_increasing(cast(List[float], results["Train"]["rmse"]))
lx, ly, lw = [], [], []
for i in range(n_workers):
x, y, w = tm.make_batches(
n_samples_per_batch,
n_features,
n_batches,
use_cupy,
random_state=i,
)
lx.extend(x)
ly.extend(y)
lw.extend(w)
X = concat(lx)
yconcat = concat(ly)
wconcat = concat(lw)
Xy = xgb.DMatrix(X, yconcat, wconcat, nthread=n_threads)
results_local: xgb.callback.TrainingCallback.EvalsLog = {}
booster = xgb.train(
{"tree_method": "hist", "nthread": n_threads},
Xy,
evals=[(Xy, "Train")],
num_boost_round=32,
evals_result=results_local,
)
np.testing.assert_allclose(
results["Train"]["rmse"], results_local["Train"]["rmse"], rtol=1e-4
)
@gen_cluster(client=True)
async def test_external_memory(
client: Client, s: Scheduler, a: Worker, b: Worker
) -> None:
workers = tm.get_client_workers(client)
args = await client.sync(
xgb.dask._get_rabit_args,
len(workers),
None,
client,
)
n_workers = len(workers)
futs = client.map(
run_external_memory, range(n_workers), n_workers=n_workers, comm_args=args
)
await client.gather(futs)
|
from typing import Dict
import torch.nn.functional as F
from torch import Tensor, nn
class Normalize(nn.Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self):
super(Normalize, self).__init__()
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": F.normalize(features["sentence_embedding"], p=2, dim=1)})
return features
def save(self, output_path):
pass
@staticmethod
def load(input_path):
return Normalize()
|
from torch import Tensor
from torch import nn
from typing import Dict
import torch.nn.functional as F
class Normalize(nn.Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self):
super(Normalize, self).__init__()
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": F.normalize(features["sentence_embedding"], p=2, dim=1)})
return features
def save(self, output_path):
pass
@staticmethod
def load(input_path):
return Normalize()
|
from typing import Dict, Set
from fastapi import WebSocket
from backend.data import execution
from backend.server.model import Methods, WsMessage
class ConnectionManager:
def __init__(self):
self.active_connections: Set[WebSocket] = set()
self.subscriptions: Dict[str, Set[WebSocket]] = {}
async def connect(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.add(websocket)
def disconnect(self, websocket: WebSocket):
self.active_connections.remove(websocket)
for subscribers in self.subscriptions.values():
subscribers.discard(websocket)
async def subscribe(self, graph_id: str, graph_version: int, websocket: WebSocket):
key = f"{graph_id}_{graph_version}"
if key not in self.subscriptions:
self.subscriptions[key] = set()
self.subscriptions[key].add(websocket)
async def unsubscribe(
self, graph_id: str, graph_version: int, websocket: WebSocket
):
key = f"{graph_id}_{graph_version}"
if key in self.subscriptions:
self.subscriptions[key].discard(websocket)
if not self.subscriptions[key]:
del self.subscriptions[key]
async def send_execution_result(self, result: execution.ExecutionResult):
key = f"{result.graph_id}_{result.graph_version}"
if key in self.subscriptions:
message = WsMessage(
method=Methods.EXECUTION_EVENT,
channel=key,
data=result.model_dump(),
).model_dump_json()
for connection in self.subscriptions[key]:
await connection.send_text(message)
|
from typing import Dict, Set
from fastapi import WebSocket
from backend.data import execution
from backend.server.model import Methods, WsMessage
class ConnectionManager:
def __init__(self):
self.active_connections: Set[WebSocket] = set()
self.subscriptions: Dict[str, Set[WebSocket]] = {}
async def connect(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.add(websocket)
def disconnect(self, websocket: WebSocket):
self.active_connections.remove(websocket)
for subscribers in self.subscriptions.values():
subscribers.discard(websocket)
async def subscribe(self, graph_id: str, websocket: WebSocket):
if graph_id not in self.subscriptions:
self.subscriptions[graph_id] = set()
self.subscriptions[graph_id].add(websocket)
async def unsubscribe(self, graph_id: str, websocket: WebSocket):
if graph_id in self.subscriptions:
self.subscriptions[graph_id].discard(websocket)
if not self.subscriptions[graph_id]:
del self.subscriptions[graph_id]
async def send_execution_result(self, result: execution.ExecutionResult):
graph_id = result.graph_id
if graph_id in self.subscriptions:
message = WsMessage(
method=Methods.EXECUTION_EVENT,
channel=graph_id,
data=result.model_dump(),
).model_dump_json()
for connection in self.subscriptions[graph_id]:
await connection.send_text(message)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
from mmdet.datasets.pipelines import (LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(__file__), '../../data')
def test_load_img(self):
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == osp.join(self.data_prefix, 'color.jpg')
assert results['ori_filename'] == 'color.jpg'
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='color', channel_order='bgr', " + \
"file_client_args={'backend': 'disk'})"
# no img_prefix
results = dict(
img_prefix=None, img_info=dict(filename='tests/data/color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == 'tests/data/color.jpg'
assert results['ori_filename'] == 'tests/data/color.jpg'
assert results['img'].shape == (288, 512, 3)
# to_float32
transform = LoadImageFromFile(to_float32=True)
results = transform(copy.deepcopy(results))
assert results['img'].dtype == np.float32
# gray image
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
transform = LoadImageFromFile(color_type='unchanged')
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512)
assert results['img'].dtype == np.uint8
def test_load_multi_channel_img(self):
results = dict(
img_prefix=self.data_prefix,
img_info=dict(filename=['color.jpg', 'color.jpg']))
transform = LoadMultiChannelImageFromFiles()
results = transform(copy.deepcopy(results))
assert results['filename'] == [
osp.join(self.data_prefix, 'color.jpg'),
osp.join(self.data_prefix, 'color.jpg')
]
assert results['ori_filename'] == ['color.jpg', 'color.jpg']
assert results['img'].shape == (288, 512, 3, 2)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3, 2)
assert results['ori_shape'] == (288, 512, 3, 2)
assert results['pad_shape'] == (288, 512, 3, 2)
assert results['scale_factor'] == 1.0
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='unchanged', " + \
"file_client_args={'backend': 'disk'})"
def test_load_webcam_img(self):
img = mmcv.imread(osp.join(self.data_prefix, 'color.jpg'))
results = dict(img=img)
transform = LoadImageFromWebcam()
results = transform(copy.deepcopy(results))
assert results['filename'] is None
assert results['ori_filename'] is None
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
from mmdet.datasets.pipelines import (LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(__file__), '../../data')
def test_load_img(self):
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == osp.join(self.data_prefix, 'color.jpg')
assert results['ori_filename'] == 'color.jpg'
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='color', " + \
"file_client_args={'backend': 'disk'})"
# no img_prefix
results = dict(
img_prefix=None, img_info=dict(filename='tests/data/color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == 'tests/data/color.jpg'
assert results['ori_filename'] == 'tests/data/color.jpg'
assert results['img'].shape == (288, 512, 3)
# to_float32
transform = LoadImageFromFile(to_float32=True)
results = transform(copy.deepcopy(results))
assert results['img'].dtype == np.float32
# gray image
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
transform = LoadImageFromFile(color_type='unchanged')
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512)
assert results['img'].dtype == np.uint8
def test_load_multi_channel_img(self):
results = dict(
img_prefix=self.data_prefix,
img_info=dict(filename=['color.jpg', 'color.jpg']))
transform = LoadMultiChannelImageFromFiles()
results = transform(copy.deepcopy(results))
assert results['filename'] == [
osp.join(self.data_prefix, 'color.jpg'),
osp.join(self.data_prefix, 'color.jpg')
]
assert results['ori_filename'] == ['color.jpg', 'color.jpg']
assert results['img'].shape == (288, 512, 3, 2)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3, 2)
assert results['ori_shape'] == (288, 512, 3, 2)
assert results['pad_shape'] == (288, 512, 3, 2)
assert results['scale_factor'] == 1.0
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='unchanged', " + \
"file_client_args={'backend': 'disk'})"
def test_load_webcam_img(self):
img = mmcv.imread(osp.join(self.data_prefix, 'color.jpg'))
results = dict(img=img)
transform = LoadImageFromWebcam()
results = transform(copy.deepcopy(results))
assert results['filename'] is None
assert results['ori_filename'] is None
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import tempfile
import mmcv
import numpy as np
import pytest
import torch
from mmdet.core import visualization as vis
def test_color():
assert vis.color_val_matplotlib(mmcv.Color.blue) == (0., 0., 1.)
assert vis.color_val_matplotlib('green') == (0., 1., 0.)
assert vis.color_val_matplotlib((1, 2, 3)) == (3 / 255, 2 / 255, 1 / 255)
assert vis.color_val_matplotlib(100) == (100 / 255, 100 / 255, 100 / 255)
assert vis.color_val_matplotlib(np.zeros(3, dtype=np.int)) == (0., 0., 0.)
# forbid white color
with pytest.raises(TypeError):
vis.color_val_matplotlib([255, 255, 255])
# forbid float
with pytest.raises(TypeError):
vis.color_val_matplotlib(1.0)
# overflowed
with pytest.raises(AssertionError):
vis.color_val_matplotlib((0, 0, 500))
def test_imshow_det_bboxes():
tmp_filename = osp.join(tempfile.gettempdir(), 'det_bboxes_image',
'image.jpg')
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
out_image = vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape
assert not np.allclose(image, out_image)
os.remove(tmp_filename)
# test grayscale images
image = np.ones((10, 10), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
out_image = vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape[:2]
os.remove(tmp_filename)
# test shaped (0,)
image = np.ones((10, 10, 3), np.uint8)
bbox = np.ones((0, 4))
label = np.ones((0, ))
vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test mask
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
segms = np.random.random((2, 10, 10)) > 0.5
segms = np.array(segms, np.int32)
vis.imshow_det_bboxes(
image, bbox, label, segms, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test tensor mask type error
with pytest.raises(AttributeError):
segms = torch.tensor(segms)
vis.imshow_det_bboxes(image, bbox, label, segms, show=False)
def test_imshow_gt_det_bboxes():
tmp_filename = osp.join(tempfile.gettempdir(), 'det_bboxes_image',
'image.jpg')
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
annotation = dict(gt_bboxes=bbox, gt_labels=label)
det_result = np.array([[2, 1, 3, 3, 0], [3, 4, 6, 6, 1]])
result = [det_result]
out_image = vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape
assert not np.allclose(image, out_image)
os.remove(tmp_filename)
# test grayscale images
image = np.ones((10, 10), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
annotation = dict(gt_bboxes=bbox, gt_labels=label)
det_result = np.array([[2, 1, 3, 3, 0], [3, 4, 6, 6, 1]])
result = [det_result]
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test numpy mask
gt_mask = np.ones((2, 10, 10))
annotation['gt_masks'] = gt_mask
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test tensor mask
gt_mask = torch.ones((2, 10, 10))
annotation['gt_masks'] = gt_mask
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test unsupported type
annotation['gt_masks'] = []
with pytest.raises(TypeError):
vis.imshow_gt_det_bboxes(image, annotation, result, show=False)
|
# Copyright (c) Open-MMLab. All rights reserved.
import os
import os.path as osp
import tempfile
import mmcv
import numpy as np
import pytest
import torch
from mmdet.core import visualization as vis
def test_color():
assert vis.color_val_matplotlib(mmcv.Color.blue) == (0., 0., 1.)
assert vis.color_val_matplotlib('green') == (0., 1., 0.)
assert vis.color_val_matplotlib((1, 2, 3)) == (3 / 255, 2 / 255, 1 / 255)
assert vis.color_val_matplotlib(100) == (100 / 255, 100 / 255, 100 / 255)
assert vis.color_val_matplotlib(np.zeros(3, dtype=np.int)) == (0., 0., 0.)
# forbid white color
with pytest.raises(TypeError):
vis.color_val_matplotlib([255, 255, 255])
# forbid float
with pytest.raises(TypeError):
vis.color_val_matplotlib(1.0)
# overflowed
with pytest.raises(AssertionError):
vis.color_val_matplotlib((0, 0, 500))
def test_imshow_det_bboxes():
tmp_filename = osp.join(tempfile.gettempdir(), 'det_bboxes_image',
'image.jpg')
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
out_image = vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape
assert not np.allclose(image, out_image)
os.remove(tmp_filename)
# test grayscale images
image = np.ones((10, 10), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
out_image = vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape[:2]
os.remove(tmp_filename)
# test shaped (0,)
image = np.ones((10, 10, 3), np.uint8)
bbox = np.ones((0, 4))
label = np.ones((0, ))
vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test mask
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
segms = np.random.random((2, 10, 10)) > 0.5
segms = np.array(segms, np.int32)
vis.imshow_det_bboxes(
image, bbox, label, segms, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test tensor mask type error
with pytest.raises(AttributeError):
segms = torch.tensor(segms)
vis.imshow_det_bboxes(image, bbox, label, segms, show=False)
def test_imshow_gt_det_bboxes():
tmp_filename = osp.join(tempfile.gettempdir(), 'det_bboxes_image',
'image.jpg')
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
annotation = dict(gt_bboxes=bbox, gt_labels=label)
det_result = np.array([[2, 1, 3, 3, 0], [3, 4, 6, 6, 1]])
result = [det_result]
out_image = vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape
assert not np.allclose(image, out_image)
os.remove(tmp_filename)
# test grayscale images
image = np.ones((10, 10), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
annotation = dict(gt_bboxes=bbox, gt_labels=label)
det_result = np.array([[2, 1, 3, 3, 0], [3, 4, 6, 6, 1]])
result = [det_result]
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test numpy mask
gt_mask = np.ones((2, 10, 10))
annotation['gt_masks'] = gt_mask
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test tensor mask
gt_mask = torch.ones((2, 10, 10))
annotation['gt_masks'] = gt_mask
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test unsupported type
annotation['gt_masks'] = []
with pytest.raises(TypeError):
vis.imshow_gt_det_bboxes(image, annotation, result, show=False)
|
from __future__ import annotations
from typing import Any, Optional, Union
import torch
from ._datapoint import Datapoint
class Video(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for videos.
Args:
data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Video:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if data.ndim < 4:
raise ValueError
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
_VideoType = Union[torch.Tensor, Video]
_VideoTypeJIT = torch.Tensor
_TensorVideoType = Union[torch.Tensor, Video]
_TensorVideoTypeJIT = torch.Tensor
|
from __future__ import annotations
from typing import Any, Optional, Union
import torch
from ._datapoint import Datapoint
class Video(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for videos.
Args:
data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Video:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if data.ndim < 4:
raise ValueError
return cls._wrap(tensor)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
_VideoType = Union[torch.Tensor, Video]
_VideoTypeJIT = torch.Tensor
_TensorVideoType = Union[torch.Tensor, Video]
_TensorVideoTypeJIT = torch.Tensor
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import PointCloud3D
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_point_cloud(file_url):
print(f"file_url = {file_url}")
point_cloud = PointCloud3D(url=file_url)
point_cloud.tensors = point_cloud.url.load(samples=100)
assert isinstance(point_cloud.tensors.points, np.ndarray)
def test_point_cloud_np():
pc = parse_obj_as(PointCloud3D, np.zeros((10, 3)))
assert (pc.tensors.points == np.zeros((10, 3))).all()
def test_point_cloud_torch():
pc = parse_obj_as(PointCloud3D, torch.zeros(10, 3))
assert (pc.tensors.points == torch.zeros(10, 3)).all()
@pytest.mark.tensorflow
def test_point_cloud_tensorflow():
pc = parse_obj_as(PointCloud3D, tf.zeros((10, 3)))
assert tnp.allclose(pc.tensors.points.tensor, tf.zeros((10, 3)))
def test_point_cloud_shortcut_doc():
class MyDoc(BaseDoc):
pc: PointCloud3D
pc2: PointCloud3D
pc3: PointCloud3D
doc = MyDoc(
pc='http://myurl.ply',
pc2=np.zeros((10, 3)),
pc3=torch.zeros(10, 3),
)
assert doc.pc.url == 'http://myurl.ply'
assert (doc.pc2.tensors.points == np.zeros((10, 3))).all()
assert (doc.pc3.tensors.points == torch.zeros(10, 3)).all()
@pytest.mark.tensorflow
def test_point_cloud_shortcut_doc_tf():
class MyDoc(BaseDoc):
pc: PointCloud3D
pc2: PointCloud3D
doc = MyDoc(
pc='http://myurl.ply',
pc2=tf.zeros((10, 3)),
)
assert doc.pc.url == 'http://myurl.ply'
assert tnp.allclose(doc.pc2.tensors.points.tensor, tf.zeros((10, 3)))
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import PointCloud3D
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_point_cloud(file_url):
print(f"file_url = {file_url}")
point_cloud = PointCloud3D(url=file_url)
point_cloud.tensors = point_cloud.url.load(samples=100)
assert isinstance(point_cloud.tensors.points, np.ndarray)
def test_point_cloud_np():
pc = parse_obj_as(PointCloud3D, np.zeros((10, 3)))
assert (pc.tensors.points == np.zeros((10, 3))).all()
def test_point_cloud_torch():
pc = parse_obj_as(PointCloud3D, torch.zeros(10, 3))
assert (pc.tensors.points == torch.zeros(10, 3)).all()
@pytest.mark.tensorflow
def test_point_cloud_tensorflow():
pc = parse_obj_as(PointCloud3D, tf.zeros((10, 3)))
assert tnp.allclose(pc.tensors.points.tensor, tf.zeros((10, 3)))
def test_point_cloud_shortcut_doc():
class MyDoc(BaseDoc):
pc: PointCloud3D
pc2: PointCloud3D
pc3: PointCloud3D
doc = MyDoc(
pc='http://myurl.ply',
pc2=np.zeros((10, 3)),
pc3=torch.zeros(10, 3),
)
assert doc.pc.url == 'http://myurl.ply'
assert (doc.pc2.tensors.points == np.zeros((10, 3))).all()
assert (doc.pc3.tensors.points == torch.zeros(10, 3)).all()
@pytest.mark.tensorflow
def test_point_cloud_shortcut_doc_tf():
class MyDoc(BaseDoc):
pc: PointCloud3D
pc2: PointCloud3D
doc = MyDoc(
pc='http://myurl.ply',
pc2=tf.zeros((10, 3)),
)
assert doc.pc.url == 'http://myurl.ply'
assert tnp.allclose(doc.pc2.tensors.points.tensor, tf.zeros((10, 3)))
|
_base_ = '../rpn/rpn_r50-caffe_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5)),
test_cfg=dict(rpn=dict(nms_post=1000)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='GARPNHead',
in_channels=256,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=8,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[8],
strides=[4, 8, 16, 32, 64]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.14, 0.14]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.07, 0.07, 0.11, 0.11]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5)),
test_cfg=dict(rpn=dict(nms_post=1000)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
sys.path.append("..")
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class CustomDiffusion(ExamplesTestsAccelerate):
def test_custom_diffusion(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/custom_diffusion/train_custom_diffusion.py
--pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe
--instance_data_dir docs/source/en/imgs
--instance_prompt <new1>
--resolution 64
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 2
--learning_rate 1.0e-05
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--modifier_token <new1>
--no_safe_serialization
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
# save_pretrained smoke test
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_custom_diffusion_weights.bin")))
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "<new1>.bin")))
def test_custom_diffusion_checkpointing_checkpoints_total_limit(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/custom_diffusion/train_custom_diffusion.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--instance_data_dir=docs/source/en/imgs
--output_dir={tmpdir}
--instance_prompt=<new1>
--resolution=64
--train_batch_size=1
--modifier_token=<new1>
--dataloader_num_workers=0
--max_train_steps=6
--checkpoints_total_limit=2
--checkpointing_steps=2
--no_safe_serialization
""".split()
run_command(self._launch_args + test_args)
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"})
def test_custom_diffusion_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/custom_diffusion/train_custom_diffusion.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--instance_data_dir=docs/source/en/imgs
--output_dir={tmpdir}
--instance_prompt=<new1>
--resolution=64
--train_batch_size=1
--modifier_token=<new1>
--dataloader_num_workers=0
--max_train_steps=4
--checkpointing_steps=2
--no_safe_serialization
""".split()
run_command(self._launch_args + test_args)
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4"},
)
resume_run_args = f"""
examples/custom_diffusion/train_custom_diffusion.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--instance_data_dir=docs/source/en/imgs
--output_dir={tmpdir}
--instance_prompt=<new1>
--resolution=64
--train_batch_size=1
--modifier_token=<new1>
--dataloader_num_workers=0
--max_train_steps=8
--checkpointing_steps=2
--resume_from_checkpoint=checkpoint-4
--checkpoints_total_limit=2
--no_safe_serialization
""".split()
run_command(self._launch_args + resume_run_args)
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"})
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
sys.path.append("..")
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class CustomDiffusion(ExamplesTestsAccelerate):
def test_custom_diffusion(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/custom_diffusion/train_custom_diffusion.py
--pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe
--instance_data_dir docs/source/en/imgs
--instance_prompt <new1>
--resolution 64
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 2
--learning_rate 1.0e-05
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--modifier_token <new1>
--no_safe_serialization
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
# save_pretrained smoke test
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_custom_diffusion_weights.bin")))
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "<new1>.bin")))
def test_custom_diffusion_checkpointing_checkpoints_total_limit(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/custom_diffusion/train_custom_diffusion.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--instance_data_dir=docs/source/en/imgs
--output_dir={tmpdir}
--instance_prompt=<new1>
--resolution=64
--train_batch_size=1
--modifier_token=<new1>
--dataloader_num_workers=0
--max_train_steps=6
--checkpoints_total_limit=2
--checkpointing_steps=2
--no_safe_serialization
""".split()
run_command(self._launch_args + test_args)
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"})
def test_custom_diffusion_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/custom_diffusion/train_custom_diffusion.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--instance_data_dir=docs/source/en/imgs
--output_dir={tmpdir}
--instance_prompt=<new1>
--resolution=64
--train_batch_size=1
--modifier_token=<new1>
--dataloader_num_workers=0
--max_train_steps=4
--checkpointing_steps=2
--no_safe_serialization
""".split()
run_command(self._launch_args + test_args)
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4"},
)
resume_run_args = f"""
examples/custom_diffusion/train_custom_diffusion.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--instance_data_dir=docs/source/en/imgs
--output_dir={tmpdir}
--instance_prompt=<new1>
--resolution=64
--train_batch_size=1
--modifier_token=<new1>
--dataloader_num_workers=0
--max_train_steps=8
--checkpointing_steps=2
--resume_from_checkpoint=checkpoint-4
--checkpoints_total_limit=2
--no_safe_serialization
""".split()
run_command(self._launch_args + resume_run_args)
self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"})
|
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action:' after 'Thought:'"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action Input:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
"Parsing LLM output produced both a final answer and a parse-able action:"
)
class ReActSingleInputOutputParser(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action: search
Action Input: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match:
if includes_answer:
msg = f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
raise OutputParserException(msg)
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
elif includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
elif not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
else:
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(msg)
@property
def _type(self) -> str:
return "react-single-input"
|
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action:' after 'Thought:'"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action Input:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
"Parsing LLM output produced both a final answer and a parse-able action:"
)
class ReActSingleInputOutputParser(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action: search
Action Input: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match:
if includes_answer:
raise OutputParserException(
f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
)
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
elif includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
elif not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
else:
raise OutputParserException(f"Could not parse LLM output: `{text}`")
@property
def _type(self) -> str:
return "react-single-input"
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestKDSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['ld/ld_r18_gflv1_r101_fpn_coco_1x.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
# Test forward train
losses = detector.forward(batch_inputs, data_samples, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.data_elements import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestKDSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['ld/ld_r18_gflv1_r101_fpn_coco_1x.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
# Test forward train
losses = detector.forward(batch_inputs, data_samples, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.util import fullname
class MSELoss(nn.Module):
def __init__(self, model: CrossEncoder, **kwargs) -> None:
super().__init__()
self.model = model
self.loss_fct = nn.MSELoss(**kwargs)
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"MSELoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0].view(-1)
loss = self.loss_fct(logits, labels.float())
return loss
def get_config_dict(self):
return {
"activation_fct": fullname(self.activation_fct),
}
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
# TODO: This loss hasn't been tested yet
class MSELoss(nn.Module):
def __init__(self, model: CrossEncoder, **kwargs) -> None:
super().__init__()
self.model = model
self.loss_fct = nn.MSELoss(**kwargs)
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"MSELoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0].view(-1)
loss = self.loss_fct(logits, labels.float())
return loss
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.25.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.24.1'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
_base_ = './faster-rcnn_r50-caffe-dc5_ms-1x_coco.py'
# learning policy
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
_base_ = './faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py'
# learning policy
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
from unittest.mock import AsyncMock, Mock
import pytest
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.callbacks.base import CallbackManager
from llama_index.embeddings.oci_data_science import OCIDataScienceEmbedding
from llama_index.embeddings.oci_data_science.client import AsyncClient, Client
def test_oci_data_science_embedding_class():
names_of_base_classes = [b.__name__ for b in OCIDataScienceEmbedding.__mro__]
assert BaseEmbedding.__name__ in names_of_base_classes
response_data = {
"data": {
"data": [
{"embedding": [0.1, 0.2, 0.3], "index": 0, "object": "embedding"},
{"embedding": [0.4, 0.5, 0.6], "index": 1, "object": "embedding"},
],
"model": "sentence-transformers/all-MiniLM-L6-v2",
"object": "list",
"usage": {"prompt_tokens": 14, "total_tokens": 14},
},
"headers": {},
"status": "200 OK",
}
@pytest.fixture()
def embeddings():
endpoint = "https://example.com/api"
auth = {"signer": Mock()}
model_name = "odsc-embeddings"
embed_batch_size = 10
timeout = 60
max_retries = 3
additional_kwargs = {"some_param": "value"}
default_headers = {"Custom-Header": "value"}
callback_manager = CallbackManager([])
embeddings_instance = OCIDataScienceEmbedding(
endpoint=endpoint,
model_name=model_name,
auth={"signer": Mock()},
embed_batch_size=embed_batch_size,
timeout=timeout,
max_retries=max_retries,
additional_kwargs=additional_kwargs,
default_headers=default_headers,
callback_manager=callback_manager,
)
# Mock the client
embeddings_instance._client = Mock(spec=Client)
embeddings_instance._async_client = AsyncMock(spec=AsyncClient)
return embeddings_instance
def test_get_query_embedding(embeddings):
embeddings.client.embeddings.return_value = response_data["data"]
query = "This is a test query"
embedding_vector = embeddings.get_query_embedding(query)
embeddings.client.embeddings.assert_called_once_with(
input=query,
payload=embeddings.additional_kwargs,
headers=embeddings.default_headers,
)
assert embedding_vector == [0.1, 0.2, 0.3]
def test_get_text_embedding(embeddings):
embeddings.client.embeddings.return_value = response_data["data"]
text = "This is a test text"
embedding_vector = embeddings.get_text_embedding(text)
embeddings.client.embeddings.assert_called_once_with(
input=text,
payload=embeddings.additional_kwargs,
headers=embeddings.default_headers,
)
assert embedding_vector == [0.1, 0.2, 0.3]
def test_get_text_embedding_batch(embeddings):
embeddings.client.embeddings.return_value = response_data["data"]
texts = ["Text one", "Text two"]
embedding_vectors = embeddings.get_text_embedding_batch(texts)
embeddings.client.embeddings.assert_called_once_with(
input=texts,
payload=embeddings.additional_kwargs,
headers=embeddings.default_headers,
)
assert embedding_vectors == [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
@pytest.mark.asyncio
async def test_aget_query_embedding(embeddings):
embeddings.async_client.embeddings.return_value = response_data["data"]
query = "Async test query"
embedding_vector = await embeddings.aget_query_embedding(query)
embeddings.async_client.embeddings.assert_called_once_with(
input=query,
payload=embeddings.additional_kwargs,
headers=embeddings.default_headers,
)
assert embedding_vector == [0.1, 0.2, 0.3]
@pytest.mark.asyncio
async def test_aget_text_embedding(embeddings):
embeddings.async_client.embeddings.return_value = response_data["data"]
text = "Async test text"
embedding_vector = await embeddings.aget_text_embedding(text)
embeddings.async_client.embeddings.assert_called_once_with(
input=text,
payload=embeddings.additional_kwargs,
headers=embeddings.default_headers,
)
assert embedding_vector == [0.1, 0.2, 0.3]
@pytest.mark.asyncio
async def test_aget_text_embedding_batch(embeddings):
embeddings.async_client.embeddings.return_value = response_data["data"]
texts = ["Async text one", "Async text two"]
embedding_vectors = await embeddings.aget_text_embedding_batch(texts)
embeddings.async_client.embeddings.assert_called_once_with(
input=texts,
payload=embeddings.additional_kwargs,
headers=embeddings.default_headers,
)
assert embedding_vectors == [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
|
from unittest.mock import AsyncMock, Mock
import pytest
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.callbacks.base import CallbackManager
from llama_index.embeddings.oci_data_science import OCIDataScienceEmbedding
from llama_index.embeddings.oci_data_science.client import AsyncClient, Client
def test_oci_data_science_embedding_class():
names_of_base_classes = [b.__name__ for b in OCIDataScienceEmbedding.__mro__]
assert BaseEmbedding.__name__ in names_of_base_classes
response_data = {
"data": {
"data": [
{"embedding": [0.1, 0.2, 0.3], "index": 0, "object": "embedding"},
{"embedding": [0.4, 0.5, 0.6], "index": 1, "object": "embedding"},
],
"model": "sentence-transformers/all-MiniLM-L6-v2",
"object": "list",
"usage": {"prompt_tokens": 14, "total_tokens": 14},
},
"headers": {},
"status": "200 OK",
}
@pytest.fixture()
def embeddings():
endpoint = "https://example.com/api"
auth = {"signer": Mock()}
model_name = "odsc-embeddings"
embed_batch_size = 10
timeout = 60
max_retries = 3
additional_kwargs = {"some_param": "value"}
default_headers = {"Custom-Header": "value"}
callback_manager = CallbackManager([])
embeddings_instance = OCIDataScienceEmbedding(
endpoint=endpoint,
model_name=model_name,
auth={"signer": Mock()},
embed_batch_size=embed_batch_size,
timeout=timeout,
max_retries=max_retries,
additional_kwargs=additional_kwargs,
default_headers=default_headers,
callback_manager=callback_manager,
)
# Mock the client
embeddings_instance._client = Mock(spec=Client)
embeddings_instance._async_client = AsyncMock(spec=AsyncClient)
return embeddings_instance
def test_get_query_embedding(embeddings):
embeddings.client.embeddings.return_value = response_data["data"]
query = "This is a test query"
embedding_vector = embeddings.get_query_embedding(query)
embeddings.client.embeddings.assert_called_once_with(
input=query,
payload=embeddings.additional_kwargs,
headers=embeddings.default_headers,
)
assert embedding_vector == [0.1, 0.2, 0.3]
def test_get_text_embedding(embeddings):
embeddings.client.embeddings.return_value = response_data["data"]
text = "This is a test text"
embedding_vector = embeddings.get_text_embedding(text)
embeddings.client.embeddings.assert_called_once_with(
input=text,
payload=embeddings.additional_kwargs,
headers=embeddings.default_headers,
)
assert embedding_vector == [0.1, 0.2, 0.3]
def test_get_text_embedding_batch(embeddings):
embeddings.client.embeddings.return_value = response_data["data"]
texts = ["Text one", "Text two"]
embedding_vectors = embeddings.get_text_embedding_batch(texts)
embeddings.client.embeddings.assert_called_once_with(
input=texts,
payload=embeddings.additional_kwargs,
headers=embeddings.default_headers,
)
assert embedding_vectors == [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
@pytest.mark.asyncio()
async def test_aget_query_embedding(embeddings):
embeddings.async_client.embeddings.return_value = response_data["data"]
query = "Async test query"
embedding_vector = await embeddings.aget_query_embedding(query)
embeddings.async_client.embeddings.assert_called_once_with(
input=query,
payload=embeddings.additional_kwargs,
headers=embeddings.default_headers,
)
assert embedding_vector == [0.1, 0.2, 0.3]
@pytest.mark.asyncio()
async def test_aget_text_embedding(embeddings):
embeddings.async_client.embeddings.return_value = response_data["data"]
text = "Async test text"
embedding_vector = await embeddings.aget_text_embedding(text)
embeddings.async_client.embeddings.assert_called_once_with(
input=text,
payload=embeddings.additional_kwargs,
headers=embeddings.default_headers,
)
assert embedding_vector == [0.1, 0.2, 0.3]
@pytest.mark.asyncio()
async def test_aget_text_embedding_batch(embeddings):
embeddings.async_client.embeddings.return_value = response_data["data"]
texts = ["Async text one", "Async text two"]
embedding_vectors = await embeddings.aget_text_embedding_batch(texts)
embeddings.async_client.embeddings.assert_called_once_with(
input=texts,
payload=embeddings.additional_kwargs,
headers=embeddings.default_headers,
)
assert embedding_vectors == [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
|
import asyncio
import logging
import sentry_sdk
from pydantic import SecretStr
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from backend.util.settings import Settings
def sentry_init():
sentry_dsn = Settings().secrets.sentry_dsn
sentry_sdk.init(
dsn=sentry_dsn,
traces_sample_rate=1.0,
profiles_sample_rate=1.0,
environment=f"app:{Settings().config.app_env.value}-behave:{Settings().config.behave_as.value}",
_experiments={"enable_logs": True},
integrations=[
LoggingIntegration(sentry_logs_level=logging.INFO),
AnthropicIntegration(
include_prompts=False,
),
],
)
def sentry_capture_error(error: Exception):
sentry_sdk.capture_exception(error)
sentry_sdk.flush()
def discord_send_alert(content: str):
from backend.blocks.discord import SendDiscordMessageBlock
from backend.data.model import APIKeyCredentials, CredentialsMetaInput, ProviderName
from backend.util.settings import Settings
settings = Settings()
creds = APIKeyCredentials(
provider="discord",
api_key=SecretStr(settings.secrets.discord_bot_token),
title="Provide Discord Bot Token for the platform alert",
expires_at=None,
)
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return SendDiscordMessageBlock().run_once(
SendDiscordMessageBlock.Input(
credentials=CredentialsMetaInput(
id=creds.id,
title=creds.title,
type=creds.type,
provider=ProviderName.DISCORD,
),
message_content=content,
channel_name=settings.config.platform_alert_discord_channel,
),
"status",
credentials=creds,
)
|
import logging
import sentry_sdk
from sentry_sdk.integrations.anthropic import AnthropicIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from backend.util.settings import Settings
def sentry_init():
sentry_dsn = Settings().secrets.sentry_dsn
sentry_sdk.init(
dsn=sentry_dsn,
traces_sample_rate=1.0,
profiles_sample_rate=1.0,
environment=f"app:{Settings().config.app_env.value}-behave:{Settings().config.behave_as.value}",
_experiments={"enable_logs": True},
integrations=[
LoggingIntegration(sentry_logs_level=logging.INFO),
AnthropicIntegration(
include_prompts=False,
),
],
)
def sentry_alert(error: Exception):
sentry_sdk.capture_exception(error)
sentry_sdk.flush()
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.mimetypes import MESH_EXTRA_EXTENSIONS
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
T = TypeVar('T', bound='Mesh3DUrl')
@_register_proto(proto_type_name='mesh_url')
class Mesh3DUrl(Url3D):
"""
URL to a file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def extra_extensions(cls) -> List[str]:
"""
Returns a list of additional file extensions that are valid for this class
but cannot be identified by the mimetypes library.
"""
return MESH_EXTRA_EXTENSIONS
def load(
self: T,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'VerticesAndFaces':
"""
Load the data from the url into a [`VerticesAndFaces`][docarray.documents.VerticesAndFaces]
object containing vertices and faces information.
---
```python
from docarray import BaseDoc
from docarray.typing import Mesh3DUrl, NdArray
class MyDoc(BaseDoc):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj")
tensors = doc.mesh_url.load()
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, NdArray)
```
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: VerticesAndFaces object containing vertices and faces information.
"""
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
if not trimesh_args:
trimesh_args = {}
mesh = self._load_trimesh_instance(
force='mesh', skip_materials=skip_materials, **trimesh_args
)
vertices = parse_obj_as(NdArray, mesh.vertices.view(np.ndarray))
faces = parse_obj_as(NdArray, mesh.faces.view(np.ndarray))
return VerticesAndFaces(vertices=vertices, faces=faces)
def display(self) -> None:
"""
Plot mesh from url.
This loads the Trimesh instance of the 3D mesh, and then displays it.
"""
from IPython.display import display
mesh = self._load_trimesh_instance(skip_materials=False)
display(mesh.show())
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.mimetypes import MESH_EXTRA_EXTENSIONS
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
T = TypeVar('T', bound='Mesh3DUrl')
@_register_proto(proto_type_name='mesh_url')
class Mesh3DUrl(Url3D):
"""
URL to a file containing 3D mesh information.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def extra_extensions(cls) -> List[str]:
"""
Returns a list of additional file extensions that are valid for this class
but cannot be identified by the mimetypes library.
"""
return MESH_EXTRA_EXTENSIONS
def load(
self: T,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'VerticesAndFaces':
"""
Load the data from the url into a [`VerticesAndFaces`][docarray.documents.VerticesAndFaces]
object containing vertices and faces information.
---
```python
from docarray import BaseDoc
from docarray.typing import Mesh3DUrl, NdArray
class MyDoc(BaseDoc):
mesh_url: Mesh3DUrl
doc = MyDoc(mesh_url="https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj")
tensors = doc.mesh_url.load()
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, NdArray)
```
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: VerticesAndFaces object containing vertices and faces information.
"""
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
if not trimesh_args:
trimesh_args = {}
mesh = self._load_trimesh_instance(
force='mesh', skip_materials=skip_materials, **trimesh_args
)
vertices = parse_obj_as(NdArray, mesh.vertices.view(np.ndarray))
faces = parse_obj_as(NdArray, mesh.faces.view(np.ndarray))
return VerticesAndFaces(vertices=vertices, faces=faces)
def display(self) -> None:
"""
Plot mesh from url.
This loads the Trimesh instance of the 3D mesh, and then displays it.
"""
from IPython.display import display
mesh = self._load_trimesh_instance(skip_materials=False)
display(mesh.show())
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity : Pearson: 0.8430 Spearman: 0.8368
Model Sparsity Stats: Row Non-Zero Mean: 81.0629997253418, Row Sparsity Mean: 0.997344046831131
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEmbeddingSimilarityEvaluator,
SparseEncoder,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from parameterized import parameterized
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing._utils import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestTwoStagePanopticSegmentor(unittest.TestCase):
def setUp(self):
register_all_modules()
def _create_model_cfg(self):
cfg_file = 'panoptic_fpn/panoptic-fpn_r50_fpn_1x_coco.py'
model_cfg = get_detector_cfg(cfg_file)
model_cfg.backbone.depth = 18
model_cfg.neck.in_channels = [64, 128, 256, 512]
model_cfg.backbone.init_cfg = None
return model_cfg
def test_init(self):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
assert detector.backbone
assert detector.neck
assert detector.rpn_head
assert detector.roi_head
assert detector.roi_head.mask_head
assert detector.with_semantic_head
assert detector.with_panoptic_fusion_head
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_loss_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, True)
# Test loss mode
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_predict_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_tensor_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
out = detector.forward(**data, mode='tensor')
self.assertIsInstance(out, tuple)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from parameterized import parameterized
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing._utils import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestTwoStagePanopticSegmentor(unittest.TestCase):
def setUp(self):
register_all_modules()
def _create_model_cfg(self):
cfg_file = 'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py'
model_cfg = get_detector_cfg(cfg_file)
model_cfg.backbone.depth = 18
model_cfg.neck.in_channels = [64, 128, 256, 512]
model_cfg.backbone.init_cfg = None
return model_cfg
def test_init(self):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
assert detector.backbone
assert detector.neck
assert detector.rpn_head
assert detector.roi_head
assert detector.roi_head.mask_head
assert detector.with_semantic_head
assert detector.with_panoptic_fusion_head
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_loss_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, True)
# Test loss mode
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_predict_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_tensor_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
out = detector.forward(**data, mode='tensor')
self.assertIsInstance(out, tuple)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms_marco_dev_small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
"""
RerankingEvaluator: Evaluating the model on the ms_marco_dev_small dataset:
Queries: 967 Positives: Min 1.0, Mean 1.1, Max 3.0 Negatives: Min 1.0, Mean 7.1, Max 9.0
MAP: 53.41
MRR@10: 54.14
NDCG@10: 65.06
Model Query Sparsity: Active Dimensions: 42.2, Sparsity Ratio: 0.9986
Model Corpus Sparsity: Active Dimensions: 126.5, Sparsity Ratio: 0.9959
"""
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
# => Primary metric: ms_marco_dev_small_ndcg@10
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6506
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms_marco_dev_small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
"""
RerankingEvaluator: Evaluating the model on the ms_marco_dev_small dataset:
Queries: 967 Positives: Min 1.0, Mean 1.1, Max 3.0 Negatives: Min 1.0, Mean 7.1, Max 9.0
MAP: 53.61
MRR@10: 54.30
NDCG@10: 65.20
Model Query Sparsity: Active Dimensions: 43.9, Sparsity Ratio: 0.9986
Model Corpus Sparsity: Active Dimensions: 128.4, Sparsity Ratio: 0.9958
"""
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
# => Primary metric: ms_marco_dev_small_ndcg@10
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6520
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import subprocess
import warnings
from packaging.version import parse
def digit_version(version_str: str, length: int = 4):
"""Convert a version string into a tuple of integers.
This method is usually used for comparing two versions. For pre-release
versions: alpha < beta < rc.
Args:
version_str (str): The version string.
length (int): The maximum number of version levels. Defaults to 4.
Returns:
tuple[int]: The version info in digits (integers).
"""
assert 'parrots' not in version_str
version = parse(version_str)
assert version.release, f'failed to parse version {version_str}'
release = list(version.release)
release = release[:length]
if len(release) < length:
release = release + [0] * (length - len(release))
if version.is_prerelease:
mapping = {'a': -3, 'b': -2, 'rc': -1}
val = -4
# version.pre can be None
if version.pre:
if version.pre[0] not in mapping:
warnings.warn(f'unknown prerelease version {version.pre[0]}, '
'version checking may go wrong')
else:
val = mapping[version.pre[0]]
release.extend([val, version.pre[-1]])
else:
release.extend([val, 0])
elif version.is_postrelease:
release.extend([1, version.post]) # type: ignore
else:
release.extend([0, 0])
return tuple(release)
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out, err = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env).communicate()
return out
def get_git_hash(fallback='unknown', digits=None):
"""Get the git hash of the current repo.
Args:
fallback (str, optional): The fallback string when git hash is
unavailable. Defaults to 'unknown'.
digits (int, optional): kept digits of the hash. Defaults to None,
meaning all digits are kept.
Returns:
str: Git commit hash.
"""
if digits is not None and not isinstance(digits, int):
raise TypeError('digits must be None or an integer')
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
if digits is not None:
sha = sha[:digits]
except OSError:
sha = fallback
return sha
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import subprocess
import warnings
from packaging.version import parse
def digit_version(version_str: str, length: int = 4):
"""Convert a version string into a tuple of integers.
This method is usually used for comparing two versions. For pre-release
versions: alpha < beta < rc.
Args:
version_str (str): The version string.
length (int): The maximum number of version levels. Default: 4.
Returns:
tuple[int]: The version info in digits (integers).
"""
assert 'parrots' not in version_str
version = parse(version_str)
assert version.release, f'failed to parse version {version_str}'
release = list(version.release)
release = release[:length]
if len(release) < length:
release = release + [0] * (length - len(release))
if version.is_prerelease:
mapping = {'a': -3, 'b': -2, 'rc': -1}
val = -4
# version.pre can be None
if version.pre:
if version.pre[0] not in mapping:
warnings.warn(f'unknown prerelease version {version.pre[0]}, '
'version checking may go wrong')
else:
val = mapping[version.pre[0]]
release.extend([val, version.pre[-1]])
else:
release.extend([val, 0])
elif version.is_postrelease:
release.extend([1, version.post]) # type: ignore
else:
release.extend([0, 0])
return tuple(release)
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out, err = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env).communicate()
return out
def get_git_hash(fallback='unknown', digits=None):
"""Get the git hash of the current repo.
Args:
fallback (str, optional): The fallback string when git hash is
unavailable. Defaults to 'unknown'.
digits (int, optional): kept digits of the hash. Defaults to None,
meaning all digits are kept.
Returns:
str: Git commit hash.
"""
if digits is not None and not isinstance(digits, int):
raise TypeError('digits must be None or an integer')
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
if digits is not None:
sha = sha[:digits]
except OSError:
sha = fallback
return sha
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of [`TorchTensor`][docarray.typing.TorchTensor], to represent an audio tensor.
Adds audio-specific features to the tensor.
---
```python
from typing import Optional
import torch
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioTorchTensor, AudioUrl
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioTorchTensor] = None
url: Optional[AudioUrl] = None
bytes_: Optional[AudioBytes] = None
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=torch.zeros(1000, 2),
)
# doc_1.audio_tensor.save(file_path='/tmp/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor, _ = doc_2.url.load()
# doc_2.audio_tensor.save(file_path='/tmp/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
```
---
"""
...
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of [`TorchTensor`][docarray.typing.TorchTensor], to represent an audio tensor.
Adds audio-specific features to the tensor.
---
```python
from typing import Optional
import torch
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioTorchTensor, AudioUrl
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioTorchTensor]
url: Optional[AudioUrl]
bytes_: Optional[AudioBytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=torch.zeros(1000, 2),
)
# doc_1.audio_tensor.save(file_path='/tmp/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor, _ = doc_2.url.load()
# doc_2.audio_tensor.save(file_path='/tmp/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
```
---
"""
...
|
import copy
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
extract_on_the_fly (`bool`, defaults to `False`):
If `True`, extract compressed files while they are being read.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
disable_tqdm (`bool`, defaults to `False`):
Whether to disable the individual files download progress bar
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
extract_on_the_fly: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
storage_options: dict[str, Any] = field(default_factory=dict)
download_desc: Optional[str] = None
disable_tqdm: bool = False
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def __setattr__(self, name, value):
if name == "token" and getattr(self, "storage_options", None) is not None:
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
elif getattr(self.storage_options["hf"], "token", None) is None:
self.storage_options["hf"]["token"] = value
super().__setattr__(name, value)
|
import copy
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
extract_on_the_fly (`bool`, defaults to `False`):
If `True`, extract compressed files while they are being read.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
disable_tqdm (`bool`, defaults to `False`):
Whether to disable the individual files download progress bar
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
extract_on_the_fly: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
storage_options: Dict[str, Any] = field(default_factory=dict)
download_desc: Optional[str] = None
disable_tqdm: bool = False
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def __setattr__(self, name, value):
if name == "token" and getattr(self, "storage_options", None) is not None:
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
elif getattr(self.storage_options["hf"], "token", None) is None:
self.storage_options["hf"]["token"] = value
super().__setattr__(name, value)
|
from jina import Client
from docarray import DocList
from docarray.documents import TextDoc
if __name__ == '__main__':
c = Client(host='grpc://0.0.0.0:54321')
da = c.post(
'/', DocList[TextDoc]([TextDoc(), TextDoc()]), return_type=DocList[TextDoc]
)
print(da.text)
|
from jina import Client
from docarray import DocList
from docarray.documents import TextDoc
if __name__ == '__main__':
c = Client(host='grpc://0.0.0.0:54321')
da = c.post('/', DocList[TextDoc]([TextDoc(), TextDoc()]), return_type=DocList[TextDoc])
print(da.text)
|
# mypy: allow-untyped-defs
import sys
from contextlib import contextmanager
from typing import TYPE_CHECKING
import torch
from torch.backends import (
__allow_nonbracketed_mutation,
_FP32Precision,
_get_fp32_precision_getter,
_set_fp32_precision_setter,
ContextProp,
PropModule,
)
def is_available():
r"""Return whether PyTorch is built with MKL-DNN support."""
return torch._C._has_mkldnn
VERBOSE_OFF = 0
VERBOSE_ON = 1
VERBOSE_ON_CREATION = 2
class verbose:
"""
On-demand oneDNN (former MKL-DNN) verbosing functionality.
To make it easier to debug performance issues, oneDNN can dump verbose
messages containing information like kernel size, input data size and
execution duration while executing the kernel. The verbosing functionality
can be invoked via an environment variable named `DNNL_VERBOSE`. However,
this methodology dumps messages in all steps. Those are a large amount of
verbose messages. Moreover, for investigating the performance issues,
generally taking verbose messages for one single iteration is enough.
This on-demand verbosing functionality makes it possible to control scope
for verbose message dumping. In the following example, verbose messages
will be dumped out for the second inference only.
.. highlight:: python
.. code-block:: python
import torch
model(data)
with torch.backends.mkldnn.verbose(torch.backends.mkldnn.VERBOSE_ON):
model(data)
Args:
level: Verbose level
- ``VERBOSE_OFF``: Disable verbosing
- ``VERBOSE_ON``: Enable verbosing
- ``VERBOSE_ON_CREATION``: Enable verbosing, including oneDNN kernel creation
"""
def __init__(self, level):
self.level = level
def __enter__(self):
if self.level == VERBOSE_OFF:
return
st = torch._C._verbose.mkldnn_set_verbose(self.level)
assert st, (
"Failed to set MKLDNN into verbose mode. Please consider to disable this verbose scope."
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
torch._C._verbose.mkldnn_set_verbose(VERBOSE_OFF)
return False
def set_flags(
_enabled=None, _deterministic=None, _allow_tf32=None, _fp32_precision="none"
):
orig_flags = (
torch._C._get_mkldnn_enabled(),
torch._C._get_mkldnn_deterministic(),
torch._C._get_onednn_allow_tf32(),
torch._C._get_fp32_precision_getter("mkldnn", "all"),
)
if _enabled is not None:
torch._C._set_mkldnn_enabled(_enabled)
if _deterministic is not None:
torch._C._set_mkldnn_deterministic(_deterministic)
if _allow_tf32 is not None:
torch._C._set_onednn_allow_tf32(_allow_tf32)
if _fp32_precision is not None:
torch._C._set_fp32_precision_setter("mkldnn", "all", _fp32_precision)
return orig_flags
@contextmanager
def flags(enabled=False, deterministic=False, allow_tf32=True, fp32_precision="none"):
with __allow_nonbracketed_mutation():
orig_flags = set_flags(enabled, deterministic, allow_tf32, fp32_precision)
try:
yield
finally:
with __allow_nonbracketed_mutation():
set_flags(*orig_flags)
class MkldnnModule(PropModule):
def __init__(self, m, name):
super().__init__(m, name)
def is_available(self):
return is_available()
enabled = ContextProp(torch._C._get_mkldnn_enabled, torch._C._set_mkldnn_enabled)
deterministic = ContextProp(
torch._C._get_mkldnn_deterministic, torch._C._set_mkldnn_deterministic
)
allow_tf32 = ContextProp(
torch._C._get_onednn_allow_tf32, torch._C._set_onednn_allow_tf32
)
matmul = _FP32Precision("mkldnn", "matmul")
conv = _FP32Precision("mkldnn", "conv")
rnn = _FP32Precision("mkldnn", "rnn")
fp32_precision = ContextProp(
_get_fp32_precision_getter("mkldnn", "all"),
_set_fp32_precision_setter("generic", "all"),
)
if TYPE_CHECKING:
enabled: ContextProp
deterministic: ContextProp
allow_tf32: ContextProp
sys.modules[__name__] = MkldnnModule(sys.modules[__name__], __name__)
|
# mypy: allow-untyped-defs
import sys
from contextlib import contextmanager
from typing import TYPE_CHECKING
import torch
from torch.backends import (
__allow_nonbracketed_mutation,
_FP32Precision,
_get_fp32_precision_getter,
_set_fp32_precision_setter,
ContextProp,
PropModule,
)
def is_available():
r"""Return whether PyTorch is built with MKL-DNN support."""
return torch._C._has_mkldnn
VERBOSE_OFF = 0
VERBOSE_ON = 1
VERBOSE_ON_CREATION = 2
class verbose:
"""
On-demand oneDNN (former MKL-DNN) verbosing functionality.
To make it easier to debug performance issues, oneDNN can dump verbose
messages containing information like kernel size, input data size and
execution duration while executing the kernel. The verbosing functionality
can be invoked via an environment variable named `DNNL_VERBOSE`. However,
this methodology dumps messages in all steps. Those are a large amount of
verbose messages. Moreover, for investigating the performance issues,
generally taking verbose messages for one single iteration is enough.
This on-demand verbosing functionality makes it possible to control scope
for verbose message dumping. In the following example, verbose messages
will be dumped out for the second inference only.
.. highlight:: python
.. code-block:: python
import torch
model(data)
with torch.backends.mkldnn.verbose(torch.backends.mkldnn.VERBOSE_ON):
model(data)
Args:
level: Verbose level
- ``VERBOSE_OFF``: Disable verbosing
- ``VERBOSE_ON``: Enable verbosing
- ``VERBOSE_ON_CREATION``: Enable verbosing, including oneDNN kernel creation
"""
def __init__(self, level):
self.level = level
def __enter__(self):
if self.level == VERBOSE_OFF:
return
st = torch._C._verbose.mkldnn_set_verbose(self.level)
assert (
st
), "Failed to set MKLDNN into verbose mode. Please consider to disable this verbose scope."
return self
def __exit__(self, exc_type, exc_val, exc_tb):
torch._C._verbose.mkldnn_set_verbose(VERBOSE_OFF)
return False
def set_flags(
_enabled=None, _deterministic=None, _allow_tf32=None, _fp32_precision="none"
):
orig_flags = (
torch._C._get_mkldnn_enabled(),
torch._C._get_mkldnn_deterministic(),
torch._C._get_onednn_allow_tf32(),
torch._C._get_fp32_precision_getter("mkldnn", "all"),
)
if _enabled is not None:
torch._C._set_mkldnn_enabled(_enabled)
if _deterministic is not None:
torch._C._set_mkldnn_deterministic(_deterministic)
if _allow_tf32 is not None:
torch._C._set_onednn_allow_tf32(_allow_tf32)
if _fp32_precision is not None:
torch._C._set_fp32_precision_setter("mkldnn", "all", _fp32_precision)
return orig_flags
@contextmanager
def flags(enabled=False, deterministic=False, allow_tf32=True, fp32_precision="none"):
with __allow_nonbracketed_mutation():
orig_flags = set_flags(enabled, deterministic, allow_tf32, fp32_precision)
try:
yield
finally:
with __allow_nonbracketed_mutation():
set_flags(*orig_flags)
class MkldnnModule(PropModule):
def __init__(self, m, name):
super().__init__(m, name)
def is_available(self):
return is_available()
enabled = ContextProp(torch._C._get_mkldnn_enabled, torch._C._set_mkldnn_enabled)
deterministic = ContextProp(
torch._C._get_mkldnn_deterministic, torch._C._set_mkldnn_deterministic
)
allow_tf32 = ContextProp(
torch._C._get_onednn_allow_tf32, torch._C._set_onednn_allow_tf32
)
matmul = _FP32Precision("mkldnn", "matmul")
conv = _FP32Precision("mkldnn", "conv")
rnn = _FP32Precision("mkldnn", "rnn")
fp32_precision = ContextProp(
_get_fp32_precision_getter("mkldnn", "all"),
_set_fp32_precision_setter("generic", "all"),
)
if TYPE_CHECKING:
enabled: ContextProp
deterministic: ContextProp
allow_tf32: ContextProp
sys.modules[__name__] = MkldnnModule(sys.modules[__name__], __name__)
|
from typing import Tuple
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing.updater import get_basescore
rng = np.random.RandomState(1994)
class TestEarlyStopping:
@pytest.mark.skipif(**tm.no_sklearn())
def test_early_stopping_nonparallel(self):
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
X, y = load_digits(n_class=2, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf1 = xgb.XGBClassifier(
learning_rate=0.1, early_stopping_rounds=5, eval_metric="auc"
)
clf1.fit(X_train, y_train, eval_set=[(X_test, y_test)])
clf2 = xgb.XGBClassifier(
learning_rate=0.1, early_stopping_rounds=4, eval_metric="auc"
)
clf2.fit(X_train, y_train, eval_set=[(X_test, y_test)])
# should be the same
assert clf1.best_score == clf2.best_score
assert clf1.best_score != 1
# check overfit
clf3 = xgb.XGBClassifier(
learning_rate=0.1, eval_metric="auc", early_stopping_rounds=10
)
clf3.fit(X_train, y_train, eval_set=[(X_test, y_test)])
base_score = get_basescore(clf3)
assert 0.53 > base_score > 0.5
clf3 = xgb.XGBClassifier(
learning_rate=0.1,
base_score=0.5,
eval_metric="auc",
early_stopping_rounds=10,
)
clf3.fit(X_train, y_train, eval_set=[(X_test, y_test)])
assert clf3.best_score == 1
@staticmethod
def assert_metrics_length(cv, expected_length):
for key, value in cv.items():
assert len(value) == expected_length
@pytest.mark.skipif(**tm.no_sklearn())
def test_cv_early_stopping(self) -> None:
from sklearn.datasets import load_digits
X, y = load_digits(n_class=2, return_X_y=True)
dm = xgb.DMatrix(X, label=y)
params = {
"max_depth": 2,
"eta": 1,
"objective": "binary:logistic",
"eval_metric": "error",
}
def evalerror(preds: np.ndarray, dtrain: xgb.DMatrix) -> Tuple[str, float]:
from sklearn.metrics import mean_squared_error
labels = dtrain.get_label()
return "rmse", mean_squared_error(labels, preds)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=10)
self.assert_metrics_length(cv, 10)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=5)
self.assert_metrics_length(cv, 3)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=1)
self.assert_metrics_length(cv, 1)
cv = xgb.cv(
params,
dm,
num_boost_round=10,
nfold=10,
custom_metric=evalerror,
early_stopping_rounds=10,
)
self.assert_metrics_length(cv, 10)
cv = xgb.cv(
params,
dm,
num_boost_round=10,
nfold=10,
custom_metric=evalerror,
early_stopping_rounds=1,
)
self.assert_metrics_length(cv, 5)
cv = xgb.cv(
params,
dm,
num_boost_round=10,
nfold=10,
custom_metric=evalerror,
maximize=True,
early_stopping_rounds=1,
)
self.assert_metrics_length(cv, 1)
@pytest.mark.skipif(**tm.no_sklearn())
@pytest.mark.skipif(**tm.no_pandas())
def test_cv_early_stopping_with_multiple_eval_sets_and_metrics(self):
from sklearn.datasets import load_breast_cancer
X, y = load_breast_cancer(return_X_y=True)
dm = xgb.DMatrix(X, label=y)
params = {"objective": "binary:logistic"}
metrics = [
["auc"],
["error"],
["logloss"],
["logloss", "auc"],
["logloss", "error"],
["error", "logloss"],
]
num_iteration_history = []
# If more than one metrics is given, early stopping should use the last metric
for i, m in enumerate(metrics):
result = xgb.cv(
params,
dm,
num_boost_round=1000,
nfold=5,
stratified=True,
metrics=m,
early_stopping_rounds=20,
seed=42,
)
num_iteration_history.append(len(result))
df = result["test-{}-mean".format(m[-1])]
# When early stopping is invoked, the last metric should be as best it can be.
if m[-1] == "auc":
assert np.all(df <= df.iloc[-1])
else:
assert np.all(df >= df.iloc[-1])
assert num_iteration_history[:3] == num_iteration_history[3:]
|
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing.updater import get_basescore
rng = np.random.RandomState(1994)
class TestEarlyStopping:
@pytest.mark.skipif(**tm.no_sklearn())
def test_early_stopping_nonparallel(self):
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
digits = load_digits(n_class=2)
X = digits["data"]
y = digits["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf1 = xgb.XGBClassifier(
learning_rate=0.1, early_stopping_rounds=5, eval_metric="auc"
)
clf1.fit(X_train, y_train, eval_set=[(X_test, y_test)])
clf2 = xgb.XGBClassifier(
learning_rate=0.1, early_stopping_rounds=4, eval_metric="auc"
)
clf2.fit(X_train, y_train, eval_set=[(X_test, y_test)])
# should be the same
assert clf1.best_score == clf2.best_score
assert clf1.best_score != 1
# check overfit
clf3 = xgb.XGBClassifier(
learning_rate=0.1, eval_metric="auc", early_stopping_rounds=10
)
clf3.fit(X_train, y_train, eval_set=[(X_test, y_test)])
base_score = get_basescore(clf3)
assert 0.53 > base_score > 0.5
clf3 = xgb.XGBClassifier(
learning_rate=0.1,
base_score=0.5,
eval_metric="auc",
early_stopping_rounds=10,
)
clf3.fit(X_train, y_train, eval_set=[(X_test, y_test)])
assert clf3.best_score == 1
def evalerror(self, preds, dtrain):
from sklearn.metrics import mean_squared_error
labels = dtrain.get_label()
preds = 1.0 / (1.0 + np.exp(-preds))
return 'rmse', mean_squared_error(labels, preds)
@staticmethod
def assert_metrics_length(cv, expected_length):
for key, value in cv.items():
assert len(value) == expected_length
@pytest.mark.skipif(**tm.no_sklearn())
def test_cv_early_stopping(self):
from sklearn.datasets import load_digits
digits = load_digits(n_class=2)
X = digits['data']
y = digits['target']
dm = xgb.DMatrix(X, label=y)
params = {
'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic',
'eval_metric': 'error'
}
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
early_stopping_rounds=10)
self.assert_metrics_length(cv, 10)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
early_stopping_rounds=5)
self.assert_metrics_length(cv, 3)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
early_stopping_rounds=1)
self.assert_metrics_length(cv, 1)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
feval=self.evalerror, early_stopping_rounds=10)
self.assert_metrics_length(cv, 10)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
feval=self.evalerror, early_stopping_rounds=1)
self.assert_metrics_length(cv, 5)
cv = xgb.cv(params, dm, num_boost_round=10, nfold=10,
feval=self.evalerror, maximize=True,
early_stopping_rounds=1)
self.assert_metrics_length(cv, 1)
@pytest.mark.skipif(**tm.no_sklearn())
@pytest.mark.skipif(**tm.no_pandas())
def test_cv_early_stopping_with_multiple_eval_sets_and_metrics(self):
from sklearn.datasets import load_breast_cancer
X, y = load_breast_cancer(return_X_y=True)
dm = xgb.DMatrix(X, label=y)
params = {'objective':'binary:logistic'}
metrics = [['auc'], ['error'], ['logloss'],
['logloss', 'auc'], ['logloss', 'error'], ['error', 'logloss']]
num_iteration_history = []
# If more than one metrics is given, early stopping should use the last metric
for i, m in enumerate(metrics):
result = xgb.cv(params, dm, num_boost_round=1000, nfold=5, stratified=True,
metrics=m, early_stopping_rounds=20, seed=42)
num_iteration_history.append(len(result))
df = result['test-{}-mean'.format(m[-1])]
# When early stopping is invoked, the last metric should be as best it can be.
if m[-1] == 'auc':
assert np.all(df <= df.iloc[-1])
else:
assert np.all(df >= df.iloc[-1])
assert num_iteration_history[:3] == num_iteration_history[3:]
|
from keras.src import tree
from keras.src.backend import KerasTensor
class SymbolicArguments:
def __init__(self, *args, **kwargs):
self.args = tree.map_structure(lambda x: x, args)
self.kwargs = tree.map_structure(lambda x: x, kwargs)
self._flat_arguments = tree.flatten((self.args, self.kwargs))
# Used to avoid expensive `tree` operations in the most common case.
if (
not self.kwargs
and len(self.args) == 1
and isinstance(self.args[0], KerasTensor)
):
self._single_positional_tensor = self.args[0]
else:
self._single_positional_tensor = None
self.keras_tensors = []
for arg in self._flat_arguments:
if isinstance(arg, KerasTensor):
self.keras_tensors.append(arg)
def convert(self, conversion_fn):
args = tree.map_structure(conversion_fn, self.args)
kwargs = tree.map_structure(conversion_fn, self.kwargs)
return args, kwargs
def fill_in(self, tensor_dict):
"""Maps KerasTensors to computed values using `tensor_dict`.
`tensor_dict` maps `KerasTensor` instances to their current values.
"""
if self._single_positional_tensor is not None:
# Performance optimization for most common case.
# Approx. 70x faster.
return (tensor_dict[id(self._single_positional_tensor)],), {}
def switch_fn(x):
if isinstance(x, KerasTensor):
return tensor_dict.get(id(x), None)
return x
return self.convert(switch_fn)
|
from keras.src import tree
from keras.src.backend import KerasTensor
class SymbolicArguments:
def __init__(self, *args, **kwargs):
self.args = tree.map_structure(lambda x: x, args)
self.kwargs = tree.map_structure(lambda x: x, kwargs)
self._flat_arguments = tree.flatten((self.args, self.kwargs))
# Used to avoid expensive `tree` operations in the most common case.
if (
not self.kwargs
and len(self.args) == 1
and isinstance(self.args[0], KerasTensor)
):
self._single_positional_tensor = self.args[0]
else:
self._single_positional_tensor = None
self.keras_tensors = []
for arg in self._flat_arguments:
if isinstance(arg, KerasTensor):
self.keras_tensors.append(arg)
def convert(self, conversion_fn):
args = tree.map_structure(conversion_fn, self.args)
kwargs = tree.map_structure(conversion_fn, self.kwargs)
return args, kwargs
def fill_in(self, tensor_dict):
"""Maps KerasTensors to computed values using `tensor_dict`.
`tensor_dict` maps `KerasTensor` instances to their current values.
"""
if self._single_positional_tensor is not None:
# Performance optimization for most common case.
# Approx. 70x faster.
return (tensor_dict[id(self._single_positional_tensor)],), {}
def switch_fn(x):
if isinstance(x, KerasTensor):
val = tensor_dict.get(id(x), None)
if val is not None:
return val
return x
return self.convert(switch_fn)
|
import numpy as np
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.array.array_stacked import DocumentArrayStacked
from docarray.documents import Image, Text
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDocument):
text: str
tensor: NdArray
da = DocumentArray(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocumentArray[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDocument):
text: Text
image: Image
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=Text(text='hello'), image=Image(tensor=np.zeros((3, 224, 224)))
)
for _ in range(10)
]
)
DocumentArray[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDocument):
text: Text
image: Image
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=Text(text='hello'), image=Image(tensor=np.zeros((3, 224, 224)))
)
for _ in range(10)
]
)
DocumentArray.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_stacked_proto():
class CustomDocument(BaseDocument):
image: NdArray
da = DocumentArray[CustomDocument](
[CustomDocument(image=np.zeros((3, 224, 224))) for _ in range(10)]
).stack()
da2 = DocumentArrayStacked.from_protobuf(da.to_protobuf())
assert isinstance(da2, DocumentArrayStacked)
|
import numpy as np
from docarray import BaseDocument, DocumentArray
from docarray.array.array_stacked import DocumentArrayStacked
from docarray.documents import Image, Text
from docarray.typing import NdArray
def test_simple_proto():
class CustomDoc(BaseDocument):
text: str
tensor: NdArray
da = DocumentArray(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocumentArray[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
def test_nested_proto():
class CustomDocument(BaseDocument):
text: Text
image: Image
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=Text(text='hello'), image=Image(tensor=np.zeros((3, 224, 224)))
)
for _ in range(10)
]
)
DocumentArray[CustomDocument].from_protobuf(da.to_protobuf())
def test_nested_proto_any_doc():
class CustomDocument(BaseDocument):
text: Text
image: Image
da = DocumentArray[CustomDocument](
[
CustomDocument(
text=Text(text='hello'), image=Image(tensor=np.zeros((3, 224, 224)))
)
for _ in range(10)
]
)
DocumentArray.from_protobuf(da.to_protobuf())
def test_stacked_proto():
class CustomDocument(BaseDocument):
image: NdArray
da = DocumentArray[CustomDocument](
[CustomDocument(image=np.zeros((3, 224, 224))) for _ in range(10)]
).stack()
da2 = DocumentArrayStacked.from_protobuf(da.to_protobuf())
assert isinstance(da2, DocumentArrayStacked)
|
"""
A quantized model executes some or all of the operations with integers rather than floating point values. This allows for a more compact models and the use of high performance vectorized operations on many hardware platforms.
As a result, you get about 40% smaller and faster models. The speed-up depends on your CPU and how PyTorch was build and can be anywhere between 10% speed-up and 300% speed-up.
Note: Quantized models are only available for CPUs. Use a GPU, if available, for optimal performance.
For more details:
https://pytorch.org/docs/stable/quantization.html
"""
import csv
import gzip
import logging
import os
import time
import torch
from torch.nn import Embedding, Linear
from torch.quantization import quantize_dynamic
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Limit torch to 4 threads
torch.set_num_threads(4)
#### Just some code to print debug information to stdout
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
### /print debug information to stdout
model_name = "all-distilroberta-v1"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name, device="cpu")
q_model = quantize_dynamic(model, {Linear, Embedding})
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark dataset")
test_samples = []
sentences = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
sentences.append(row["sentence1"])
sentences.append(row["sentence2"])
if row["split"] == "test":
test_samples.append(inp_example)
sentences = sentences[0:10000]
logging.info("Evaluating speed of unquantized model")
start_time = time.time()
emb = model.encode(sentences, show_progress_bar=True)
diff_normal = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_normal, len(sentences) / diff_normal))
logging.info("Evaluating speed of quantized model")
start_time = time.time()
emb = q_model.encode(sentences, show_progress_bar=True)
diff_quantized = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_quantized, len(sentences) / diff_quantized))
logging.info("Speed-up: {:.2f}".format(diff_normal / diff_quantized))
#########
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
logging.info("Evaluate regular model")
model.evaluate(evaluator)
print("\n\n")
logging.info("Evaluate quantized model")
q_model.evaluate(evaluator)
|
"""
A quantized model executes some or all of the operations with integers rather than floating point values. This allows for a more compact models and the use of high performance vectorized operations on many hardware platforms.
As a result, you get about 40% smaller and faster models. The speed-up depends on your CPU and how PyTorch was build and can be anywhere between 10% speed-up and 300% speed-up.
Note: Quantized models are only available for CPUs. Use a GPU, if available, for optimal performance.
For more details:
https://pytorch.org/docs/stable/quantization.html
"""
import logging
import os
import torch
from sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from torch.nn import Embedding, Linear
from torch.quantization import quantize_dynamic
import gzip
import csv
import time
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Limit torch to 4 threads
torch.set_num_threads(4)
#### Just some code to print debug information to stdout
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
### /print debug information to stdout
model_name = "all-distilroberta-v1"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name, device="cpu")
q_model = quantize_dynamic(model, {Linear, Embedding})
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark dataset")
test_samples = []
sentences = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
sentences.append(row["sentence1"])
sentences.append(row["sentence2"])
if row["split"] == "test":
test_samples.append(inp_example)
sentences = sentences[0:10000]
logging.info("Evaluating speed of unquantized model")
start_time = time.time()
emb = model.encode(sentences, show_progress_bar=True)
diff_normal = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_normal, len(sentences) / diff_normal))
logging.info("Evaluating speed of quantized model")
start_time = time.time()
emb = q_model.encode(sentences, show_progress_bar=True)
diff_quantized = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_quantized, len(sentences) / diff_quantized))
logging.info("Speed-up: {:.2f}".format(diff_normal / diff_quantized))
#########
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
logging.info("Evaluate regular model")
model.evaluate(evaluator)
print("\n\n")
logging.info("Evaluate quantized model")
q_model.evaluate(evaluator)
|
import os
from jina import Executor, requests
class FilewriterExec(Executor):
@requests
def foo(self, **kwargs):
print(self.workspace)
file = os.path.join(self.workspace, 'out.txt')
with open(file, 'w', encoding='utf-8') as f:
f.write('Filewriter was here')
|
import os
from jina import Executor, requests
class FilewriterExec(Executor):
@requests
def foo(self, **kwargs):
print(self.workspace)
file = os.path.join(self.workspace, 'out.txt')
with open(file, 'w') as f:
f.write('Filewriter was here')
|
import pytest
from unittest.mock import Mock, patch, AsyncMock
from llama_index.embeddings.nvidia import NVIDIAEmbedding
class MockEmbeddingResponse:
"""Mock response matching the structure expected by the code."""
def __init__(self):
self.data = [
Mock(embedding=[1.0, 2.0, 3.0], index=0)
]
@pytest.fixture(autouse=True)
def mock_openai():
"""Set up mock for OpenAI client."""
# Create mock response
mock_response = MockEmbeddingResponse()
# Patch at the module level where the code imports from
# NVIDIAEmbedding uses: from openai import OpenAI, AsyncOpenAI
with patch("llama_index.embeddings.nvidia.base.OpenAI") as mock_openai_cls, \
patch("llama_index.embeddings.nvidia.base.AsyncOpenAI") as mock_async_openai_cls:
# Set up the sync client mock
mock_client = Mock()
mock_embeddings = Mock()
mock_embeddings.create.return_value = mock_response
mock_client.embeddings = mock_embeddings
mock_openai_cls.return_value = mock_client
# Set up the async client mock properly
mock_aclient = Mock()
mock_aembeddings = Mock()
# Use AsyncMock for the create method to make it awaitable
mock_aembeddings.create = AsyncMock(return_value=mock_response)
mock_aclient.embeddings = mock_aembeddings
mock_async_openai_cls.return_value = mock_aclient
yield mock_client, mock_aclient
@pytest.mark.parametrize("method_name", ["get_query_embedding", "get_text_embedding"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
def test_single_truncate(method_name: str, truncate: str):
# Call the method
getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)("nvidia")
@pytest.mark.parametrize("method_name", ["aget_query_embedding", "aget_text_embedding"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
@pytest.mark.asyncio
async def test_asingle_truncate(method_name: str, truncate: str):
# Call the method
await getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)("nvidia")
@pytest.mark.parametrize("method_name", ["get_text_embedding_batch"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
def test_batch_truncate(method_name: str, truncate: str):
# Call the method
getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)(["nvidia"])
@pytest.mark.parametrize("method_name", ["aget_text_embedding_batch"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
@pytest.mark.asyncio
async def test_abatch_truncate(method_name: str, truncate: str):
# Call the method
await getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)(["nvidia"])
|
import pytest
import respx
from llama_index.embeddings.nvidia import NVIDIAEmbedding
import re
import httpx
import json
@pytest.fixture()
def mocked_route() -> respx.Route:
all_urls = re.compile(r".*/embeddings")
fake_response = httpx.Response(
200, json={"data": [{"index": 0, "embedding": [1.0, 2.0, 3.0]}]}
)
with respx.mock:
yield respx.post(all_urls).mock(return_value=fake_response)
@pytest.mark.parametrize("method_name", ["get_query_embedding", "get_text_embedding"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
def test_single_tuncate(mocked_route: respx.Route, method_name: str, truncate: str):
# call the method_name method
getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)("nvidia")
assert mocked_route.called
request = mocked_route.calls.last.request
request_body = json.loads(request.read())
assert "truncate" in request_body
assert request_body["truncate"] == truncate
@pytest.mark.parametrize("method_name", ["aget_query_embedding", "aget_text_embedding"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
@pytest.mark.asyncio
async def test_asingle_tuncate(
mocked_route: respx.Route, method_name: str, truncate: str
):
# call the method_name method
await getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)(
"nvidia"
)
assert mocked_route.called
request = mocked_route.calls.last.request
request_body = json.loads(request.read())
assert "truncate" in request_body
assert request_body["truncate"] == truncate
@pytest.mark.parametrize("method_name", ["get_text_embedding_batch"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
def test_batch_tuncate(mocked_route: respx.Route, method_name: str, truncate: str):
# call the method_name method
getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)(
["nvidia"]
)
assert mocked_route.called
request = mocked_route.calls.last.request
request_body = json.loads(request.read())
assert "truncate" in request_body
assert request_body["truncate"] == truncate
@pytest.mark.parametrize("method_name", ["aget_text_embedding_batch"])
@pytest.mark.parametrize("truncate", ["END", "START", "NONE"])
@pytest.mark.asyncio
async def test_abatch_tuncate(
mocked_route: respx.Route, method_name: str, truncate: str
):
# call the method_name method
await getattr(NVIDIAEmbedding(api_key="BOGUS", truncate=truncate), method_name)(
["nvidia"]
)
assert mocked_route.called
request = mocked_route.calls.last.request
request_body = json.loads(request.read())
assert "truncate" in request_body
assert request_body["truncate"] == truncate
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.28.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.28.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from langchain_core.vectorstores import VST, VectorStore, VectorStoreRetriever
__all__ = ["VST", "VectorStore", "VectorStoreRetriever"]
|
from langchain_core.vectorstores import VST, VectorStore, VectorStoreRetriever
__all__ = ["VectorStore", "VectorStoreRetriever", "VST"]
|
from abc import abstractmethod
import logging
from typing import Any, Dict, List, Optional
from llama_index.core.graph_stores.types import GraphStore
from .neptune import refresh_schema
logger = logging.getLogger(__name__)
class NeptuneBaseGraphStore(GraphStore):
"""This is an abstract base class that represents the shared features across the NeptuneDatabaseGraphStore
and NeptuneAnalyticsGraphStore classes.
"""
def __init__() -> None:
pass
@property
def client(self) -> Any:
return self._client
def get(self, subj: str) -> List[List[str]]:
"""Get triplets."""
query = """
MATCH (n1:%s)-[r]->(n2:%s)
WHERE n1.id = $subj
RETURN type(r), n2.id;
"""
prepared_statement = query % (self.node_label, self.node_label)
with self._driver.session(database=self._database) as session:
data = session.run(prepared_statement, {"subj": subj})
return [record.values() for record in data]
def get_rel_map(
self, subjs: Optional[List[str]] = None, depth: int = 2, limit: int = 30
) -> Dict[str, List[List[str]]]:
"""Get flat rel map."""
rel_map: Dict[Any, List[Any]] = {}
if subjs is None or len(subjs) == 0:
return rel_map
query = f"""MATCH p=(n1:{self.node_label})-[*1..{depth}]->() WHERE n1.id IN $subjs
UNWIND relationships(p) AS rel WITH n1.id AS subj, p,
collect([type(rel), endNode(rel).id])AS flattened_rels
UNWIND flattened_rels as fr
WITH DISTINCT fr, subj
RETURN subj, collect(fr) AS flattened_rels LIMIT {limit}"""
data = list(self.query(query, {"subjs": subjs}))
if not data:
return rel_map
for record in data:
rel_map[record["subj"]] = record["flattened_rels"]
return rel_map
def upsert_triplet(self, subj: str, rel: str, obj: str) -> None:
"""Add triplet to the graph."""
query = """
MERGE (n1:`%s` {id:$subj})
MERGE (n2:`%s` {id:$obj})
MERGE (n1)-[:`%s`]->(n2)
"""
prepared_statement = query % (
self.node_label.replace("`", ""),
rel.replace(" ", "_").replace("`", "").upper(),
)
self.query(
prepared_statement,
{"subj": subj.replace("`", ""), "obj": obj.replace("`", "")},
)
def delete(self, subj: str, rel: str, obj: str) -> None:
"""Delete triplet from the graph."""
def delete_rel(subj: str, obj: str, rel: str) -> None:
with self._driver.session(database=self._database) as session:
session.run(
(
"MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.id = $subj AND n2.id"
" = $obj DELETE r"
).format(self.node_label, rel, self.node_label),
{"subj": subj, "obj": obj},
)
def delete_entity(entity: str) -> None:
with self._driver.session(database=self._database) as session:
session.run(
"MATCH (n:%s) WHERE n.id = $entity DETACH DELETE n"
% self.node_label,
{"entity": entity},
)
delete_rel(subj, obj, rel)
delete_entity(subj)
def get_schema(self, refresh: bool = False) -> str:
"""Get the schema of the Neptune KG store."""
if refresh or not self.schema:
self.schema = refresh_schema(self.query, self._get_summary())["schema_str"]
return self.schema
@abstractmethod
def query(self, query: str, params: dict = {}) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def _get_summary(self) -> Dict:
raise NotImplementedError
|
from abc import abstractmethod
import logging
from typing import Any, Dict, List, Optional
from llama_index.core.graph_stores.types import GraphStore
from .neptune import refresh_schema
logger = logging.getLogger(__name__)
class NeptuneBaseGraphStore(GraphStore):
"""This is an abstract base class that represents the shared features across the NeptuneDatabaseGraphStore
and NeptuneAnalyticsGraphStore classes.
"""
def __init__() -> None:
pass
@property
def client(self) -> Any:
return self._client
def get(self, subj: str) -> List[List[str]]:
"""Get triplets."""
query = """
MATCH (n1:%s)-[r]->(n2:%s)
WHERE n1.id = $subj
RETURN type(r), n2.id;
"""
prepared_statement = query % (self.node_label, self.node_label)
with self._driver.session(database=self._database) as session:
data = session.run(prepared_statement, {"subj": subj})
return [record.values() for record in data]
def get_rel_map(
self, subjs: Optional[List[str]] = None, depth: int = 2, limit: int = 30
) -> Dict[str, List[List[str]]]:
"""Get flat rel map."""
rel_map: Dict[Any, List[Any]] = {}
if subjs is None or len(subjs) == 0:
return rel_map
query = f"""MATCH p=(n1:{self.node_label})-[*1..{depth}]->() WHERE n1.id IN $subjs
UNWIND relationships(p) AS rel WITH n1.id AS subj, p,
collect([type(rel), endNode(rel).id])AS flattened_rels
UNWIND flattened_rels as fr
WITH DISTINCT fr, subj
RETURN subj, collect(fr) AS flattened_rels LIMIT {limit}"""
data = list(self.query(query, {"subjs": subjs}))
if not data:
return rel_map
for record in data:
rel_map[record["subj"]] = record["flattened_rels"]
return rel_map
def upsert_triplet(self, subj: str, rel: str, obj: str) -> None:
"""Add triplet to the graph."""
query = """
MERGE (n1:`%s` {id:$subj})
MERGE (n2:`%s` {id:$obj})
MERGE (n1)-[:`%s`]->(n2)
"""
prepared_statement = query % (
self.node_label,
self.node_label,
rel.replace(" ", "_").upper(),
)
self.query(prepared_statement, {"subj": subj, "obj": obj})
def delete(self, subj: str, rel: str, obj: str) -> None:
"""Delete triplet from the graph."""
def delete_rel(subj: str, obj: str, rel: str) -> None:
with self._driver.session(database=self._database) as session:
session.run(
(
"MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.id = $subj AND n2.id"
" = $obj DELETE r"
).format(self.node_label, rel, self.node_label),
{"subj": subj, "obj": obj},
)
def delete_entity(entity: str) -> None:
with self._driver.session(database=self._database) as session:
session.run(
"MATCH (n:%s) WHERE n.id = $entity DETACH DELETE n"
% self.node_label,
{"entity": entity},
)
delete_rel(subj, obj, rel)
delete_entity(subj)
def get_schema(self, refresh: bool = False) -> str:
"""Get the schema of the Neptune KG store."""
if refresh or not self.schema:
self.schema = refresh_schema(self.query, self._get_summary())["schema_str"]
return self.schema
@abstractmethod
def query(self, query: str, params: dict = {}) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def _get_summary(self) -> Dict:
raise NotImplementedError
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AudioNdArray, AudioTorchTensor, AudioUrl
from tests import TOYDATA_DIR
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
assert isinstance(tensor, AudioNdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert str(proto).startswith('audio_url')
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.document.io.json import orjson_dumps
from docarray.typing import AudioNdArray, AudioTorchTensor, AudioUrl
from tests import TOYDATA_DIR
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
assert isinstance(tensor, AudioNdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert str(proto).startswith('audio_url')
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# TODO: use repeat dataset wrapper
# training schedule for 3x
train_cfg = dict(by_epoch=True, max_epochs=36)
val_cfg = dict(interval=3)
test_cfg = dict()
# learning rate
# Experiments show that using milestones=[27, 33] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# Use RepeatDataset to speed up training
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
# Experiments show that using step=[9, 11] has higher performance
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[9, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
|
import importlib
import shutil
import warnings
from typing import List
import fsspec
import fsspec.asyn
from fsspec.implementations.local import LocalFileSystem
from . import compression
COMPRESSION_FILESYSTEMS: list[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Checks if `fs` is a remote filesystem.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or `s3fs.S3FileSystem`.
"""
return not isinstance(fs, LocalFileSystem)
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
if not is_remote_filesystem(fs):
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
|
import importlib
import shutil
import warnings
from typing import List
import fsspec
import fsspec.asyn
from fsspec.implementations.local import LocalFileSystem
from . import compression
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Checks if `fs` is a remote filesystem.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or `s3fs.S3FileSystem`.
"""
return not isinstance(fs, LocalFileSystem)
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
if not is_remote_filesystem(fs):
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
|
import logging
from collections import defaultdict
from typing import Annotated, Any, Dict, List, Optional, Sequence
from fastapi import APIRouter, Body, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions import TypedDict
import backend.data.block
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKey
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.execution import NodeExecutionResult
from backend.server.external.middleware import require_permission
from backend.server.routers import v1 as internal_api_routes
from backend.util.settings import Settings
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
class NodeOutput(TypedDict):
key: str
value: Any
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: Dict[str, Any]
class ExecutionNodeOutput(TypedDict):
node_id: str
outputs: List[NodeOutput]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: List[ExecutionNode]
output: Optional[List[Dict[str, str]]]
def get_outputs_with_names(results: list[NodeExecutionResult]) -> list[dict[str, str]]:
outputs = []
for result in results:
if "output" in result.output_data:
output_value = result.output_data["output"][0]
name = result.output_data.get("name", [None])[0]
if output_value and name:
outputs.append({name: output_value})
return outputs
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))],
)
def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks if not b.disabled]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
def execute_graph_block(
block_id: str,
data: BlockInput,
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
tags=["graphs"],
)
async def execute_graph(
graph_id: str,
graph_version: int,
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
) -> dict[str, Any]:
try:
graph_exec = await internal_api_routes.execute_graph(
graph_id=graph_id,
node_input=node_input,
user_id=api_key.user_id,
graph_version=graph_version,
)
return {"id": graph_exec.graph_exec_id}
except Exception as e:
msg = str(e).encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)),
) -> GraphExecutionResult:
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
results = await execution_db.get_node_execution_results(graph_exec_id)
last_result = results[-1] if results else None
execution_status = (
last_result.status if last_result else AgentExecutionStatus.INCOMPLETE
)
outputs = get_outputs_with_names(results)
return GraphExecutionResult(
execution_id=graph_exec_id,
status=execution_status,
nodes=[
ExecutionNode(
node_id=result.node_id,
input=result.input_data.get("value", result.input_data),
output={k: v for k, v in result.output_data.items()},
)
for result in results
],
output=outputs if execution_status == AgentExecutionStatus.COMPLETED else None,
)
|
import logging
from collections import defaultdict
from typing import Annotated, Any, Dict, List, Optional, Sequence
from autogpt_libs.utils.cache import thread_cached
from fastapi import APIRouter, Body, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions import TypedDict
import backend.data.block
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKey
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.execution import NodeExecutionResult
from backend.executor import ExecutionManager
from backend.server.external.middleware import require_permission
from backend.util.service import get_service_client
from backend.util.settings import Settings
@thread_cached
def execution_manager_client() -> ExecutionManager:
return get_service_client(ExecutionManager)
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
class NodeOutput(TypedDict):
key: str
value: Any
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: Dict[str, Any]
class ExecutionNodeOutput(TypedDict):
node_id: str
outputs: List[NodeOutput]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: List[ExecutionNode]
output: Optional[List[Dict[str, str]]]
def get_outputs_with_names(results: list[NodeExecutionResult]) -> list[dict[str, str]]:
outputs = []
for result in results:
if "output" in result.output_data:
output_value = result.output_data["output"][0]
name = result.output_data.get("name", [None])[0]
if output_value and name:
outputs.append({name: output_value})
return outputs
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))],
)
def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks if not b.disabled]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
def execute_graph_block(
block_id: str,
data: BlockInput,
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
tags=["graphs"],
)
def execute_graph(
graph_id: str,
graph_version: int,
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
) -> dict[str, Any]:
try:
graph_exec = execution_manager_client().add_execution(
graph_id,
graph_version=graph_version,
data=node_input,
user_id=api_key.user_id,
)
return {"id": graph_exec.graph_exec_id}
except Exception as e:
msg = str(e).encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)),
) -> GraphExecutionResult:
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
results = await execution_db.get_node_execution_results(graph_exec_id)
last_result = results[-1] if results else None
execution_status = (
last_result.status if last_result else AgentExecutionStatus.INCOMPLETE
)
outputs = get_outputs_with_names(results)
return GraphExecutionResult(
execution_id=graph_exec_id,
status=execution_status,
nodes=[
ExecutionNode(
node_id=result.node_id,
input=result.input_data.get("value", result.input_data),
output={k: v for k, v in result.output_data.items()},
)
for result in results
],
output=outputs if execution_status == AgentExecutionStatus.COMPLETED else None,
)
|
import pathlib
from argparse import ArgumentParser
from lightning import ConformerRNNTModule, get_data_module
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
from pytorch_lightning.plugins import DDPPlugin
def run_train(args):
seed_everything(1)
checkpoint_dir = args.exp_dir / "checkpoints"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
train_checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/train_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
train_checkpoint,
lr_monitor,
]
trainer = Trainer(
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.nodes,
gpus=args.gpus,
accelerator="gpu",
strategy=DDPPlugin(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
)
model = ConformerRNNTModule(str(args.sp_model_path))
data_module = get_data_module(str(args.librispeech_path), str(args.global_stats_path), str(args.sp_model_path))
trainer.fit(model, data_module, ckpt_path=args.checkpoint_path)
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint-path",
default=None,
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
)
parser.add_argument(
"--exp-dir",
default=pathlib.Path("./exp"),
type=pathlib.Path,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--librispeech-path",
type=pathlib.Path,
help="Path to LibriSpeech datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=120,
type=int,
help="Number of epochs to train for. (Default: 120)",
)
args = parser.parse_args()
run_train(args)
if __name__ == "__main__":
cli_main()
|
import pathlib
from argparse import ArgumentParser
from lightning import ConformerRNNTModule
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
from pytorch_lightning.plugins import DDPPlugin
def run_train(args):
checkpoint_dir = args.exp_dir / "checkpoints"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
train_checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/train_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
train_checkpoint,
lr_monitor,
]
trainer = Trainer(
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.nodes,
gpus=args.gpus,
accelerator="gpu",
strategy=DDPPlugin(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
)
model = ConformerRNNTModule(
librispeech_path=str(args.librispeech_path),
sp_model_path=str(args.sp_model_path),
global_stats_path=str(args.global_stats_path),
)
trainer.fit(model)
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--exp-dir",
default=pathlib.Path("./exp"),
type=pathlib.Path,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--librispeech-path",
type=pathlib.Path,
help="Path to LibriSpeech datasets.",
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
)
parser.add_argument(
"--nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=120,
type=int,
help="Number of epochs to train for. (Default: 120)",
)
args = parser.parse_args()
run_train(args)
if __name__ == "__main__":
cli_main()
|
"""Function components."""
from inspect import signature
from typing import Any, Callable, Dict, Optional, Set, Tuple
from typing_extensions import Annotated
from llama_index.core.base.query_pipeline.query import (
InputKeys,
OutputKeys,
QueryComponent,
)
from llama_index.core.bridge.pydantic import (
Field,
PrivateAttr,
ConfigDict,
WithJsonSchema,
)
from llama_index.core.callbacks.base import CallbackManager
AnnotatedCallable = Annotated[
Callable,
WithJsonSchema({"type": "string"}, mode="serialization"),
WithJsonSchema({"type": "string"}, mode="validation"),
]
def get_parameters(fn: Callable) -> Tuple[Set[str], Set[str]]:
"""
Get parameters from function.
Returns:
Tuple[Set[str], Set[str]]: required and optional parameters
"""
# please write function below
params = signature(fn).parameters
required_params = set()
optional_params = set()
for param_name in params:
param_default = params[param_name].default
if param_default is params[param_name].empty:
required_params.add(param_name)
else:
optional_params.add(param_name)
return required_params, optional_params
class FnComponent(QueryComponent):
"""Query component that takes in an arbitrary function."""
model_config = ConfigDict(arbitrary_types_allowed=True)
fn: AnnotatedCallable = Field(..., description="Function to run.")
async_fn: Optional[AnnotatedCallable] = Field(
None, description="Async function to run. If not provided, will run `fn`."
)
output_key: str = Field(
default="output", description="Output key for component output."
)
_req_params: Set[str] = PrivateAttr()
_opt_params: Set[str] = PrivateAttr()
def __init__(
self,
fn: Callable,
async_fn: Optional[Callable] = None,
req_params: Optional[Set[str]] = None,
opt_params: Optional[Set[str]] = None,
output_key: str = "output",
**kwargs: Any,
) -> None:
"""Initialize."""
# determine parameters
super().__init__(fn=fn, async_fn=async_fn, output_key=output_key, **kwargs)
default_req_params, default_opt_params = get_parameters(fn)
if req_params is None:
req_params = default_req_params
if opt_params is None:
opt_params = default_opt_params
self._req_params = req_params
self._opt_params = opt_params
def set_callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
# TODO: implement
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
# check that all required parameters are present
missing_params = self._req_params - set(input.keys())
if missing_params:
raise ValueError(
f"Missing required parameters: {missing_params}. "
f"Input keys: {input.keys()}"
)
# check that no extra parameters are present
extra_params = set(input.keys()) - self._req_params - self._opt_params
if extra_params:
raise ValueError(
f"Extra parameters: {extra_params}. Input keys: {input.keys()}"
)
return input
def _run_component(self, **kwargs: Any) -> Dict:
"""Run component."""
return {self.output_key: self.fn(**kwargs)}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component (async)."""
if self.async_fn is None:
return self._run_component(**kwargs)
else:
return {self.output_key: await self.async_fn(**kwargs)}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
return InputKeys.from_keys(
required_keys=self._req_params, optional_keys=self._opt_params
)
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({self.output_key})
# alias
FunctionComponent = FnComponent
|
"""Function components."""
from inspect import signature
from typing import Any, Callable, Dict, Optional, Set, Tuple
from typing_extensions import Annotated
from llama_index.core.base.query_pipeline.query import (
InputKeys,
OutputKeys,
QueryComponent,
)
from llama_index.core.bridge.pydantic import (
Field,
PrivateAttr,
ConfigDict,
WithJsonSchema,
)
from llama_index.core.callbacks.base import CallbackManager
AnnotatedCallable = Annotated[
Callable,
WithJsonSchema({"type": "string"}, mode="serialization"),
WithJsonSchema({"type": "string"}, mode="validation"),
]
def get_parameters(fn: Callable) -> Tuple[Set[str], Set[str]]:
"""
Get parameters from function.
Returns:
Tuple[Set[str], Set[str]]: required and optional parameters
"""
# please write function below
params = signature(fn).parameters
required_params = set()
optional_params = set()
for param_name in params:
param_default = params[param_name].default
if param_default is params[param_name].empty:
required_params.add(param_name)
else:
optional_params.add(param_name)
return required_params, optional_params
class FnComponent(QueryComponent):
"""Query component that takes in an arbitrary function."""
model_config = ConfigDict(arbitrary_types_allowed=True)
fn: AnnotatedCallable = Field(..., description="Function to run.")
async_fn: Optional[AnnotatedCallable] = Field(
None, description="Async function to run. If not provided, will run `fn`."
)
output_key: str = Field(
default="output", description="Output key for component output."
)
_req_params: Set[str] = PrivateAttr()
_opt_params: Set[str] = PrivateAttr()
def __init__(
self,
fn: Callable,
async_fn: Optional[Callable] = None,
req_params: Optional[Set[str]] = None,
opt_params: Optional[Set[str]] = None,
output_key: str = "output",
**kwargs: Any,
) -> None:
"""Initialize."""
# determine parameters
super().__init__(fn=fn, async_fn=async_fn, output_key=output_key, **kwargs)
default_req_params, default_opt_params = get_parameters(fn)
if req_params is None:
req_params = default_req_params
if opt_params is None:
opt_params = default_opt_params
self._req_params = req_params
self._opt_params = opt_params
def set_callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
# TODO: implement
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
# check that all required parameters are present
missing_params = self._req_params - set(input.keys())
if missing_params:
raise ValueError(
f"Missing required parameters: {missing_params}. "
f"Input keys: {input.keys()}"
)
# check that no extra parameters are present
extra_params = set(input.keys()) - self._req_params - self._opt_params
if extra_params:
raise ValueError(
f"Extra parameters: {extra_params}. " f"Input keys: {input.keys()}"
)
return input
def _run_component(self, **kwargs: Any) -> Dict:
"""Run component."""
return {self.output_key: self.fn(**kwargs)}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component (async)."""
if self.async_fn is None:
return self._run_component(**kwargs)
else:
return {self.output_key: await self.async_fn(**kwargs)}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
return InputKeys.from_keys(
required_keys=self._req_params, optional_keys=self._opt_params
)
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({self.output_key})
# alias
FunctionComponent = FnComponent
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# Must be the same across all workers. If None, will use a
# random seed shared among workers
# (require synchronization among all workers)
self.seed = sync_random_seed(seed)
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# for the compatibility from PyTorch 1.3+
self.seed = seed if seed is not None else 0
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 74.90 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.37 (Threshold: 0.5959)
Precision with Cosine-Similarity: 54.15
Recall with Cosine-Similarity: 89.13
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.89
Accuracy with Dot-Product: 76.50 (Threshold: 24.3460)
F1 with Dot-Product: 66.93 (Threshold: 20.0762)
Precision with Dot-Product: 57.62
Recall with Dot-Product: 79.81
Average Precision with Dot-Product: 65.94
Matthews Correlation with Dot-Product: 48.82
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0062)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.2346)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.1993)
F1 with Manhattan-Distance: 48.60 (Threshold: -1.1565)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
Model Sparsity Stats: Row Non-Zero Mean: 63.13884735107422, Row Sparsity Mean: 0.9979313611984253
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 74.90 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.37 (Threshold: 0.5959)
Precision with Cosine-Similarity: 54.15
Recall with Cosine-Similarity: 89.13
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.89
Accuracy with Dot-Product: 76.50 (Threshold: 24.3460)
F1 with Dot-Product: 66.93 (Threshold: 20.0762)
Precision with Dot-Product: 57.62
Recall with Dot-Product: 79.81
Average Precision with Dot-Product: 65.94
Matthews Correlation with Dot-Product: 48.82
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0062)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.2346)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.1993)
F1 with Manhattan-Distance: 48.60 (Threshold: -1.1565)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
# coding: utf-8
"""Helper script for checking versions in the dynamic symbol table.
This script checks that LightGBM library is linked to the appropriate symbol versions.
Linking to newer symbol versions at compile time is problematic because it could result
in built artifacts being unusable on older platforms.
Version history for these symbols can be found at the following:
* GLIBC: https://sourceware.org/glibc/wiki/Glibc%20Timeline
* GLIBCXX: https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html
* OMP/GOMP: https://github.com/gcc-mirror/gcc/blob/master/libgomp/libgomp.map
"""
import re
import sys
from pathlib import Path
def check_dependencies(objdump_string: str) -> None:
"""Check the dynamic symbol versions.
Parameters
----------
objdump_string : str
The dynamic symbol table entries of the file (result of `objdump -T` command).
"""
GLIBC_version = re.compile(r'0{16}[ \(\t]+GLIBC_(\d{1,2})[.](\d{1,3})[.]?\d{,3}[ \)\t]+')
versions = GLIBC_version.findall(objdump_string)
assert len(versions) > 1
for major, minor in versions:
error_msg = f"found unexpected GLIBC version: '{major}.{minor}'"
assert int(major) <= 2, error_msg
assert int(minor) <= 28, error_msg
GLIBCXX_version = re.compile(r'0{16}[ \(\t]+GLIBCXX_(\d{1,2})[.](\d{1,2})[.]?(\d{,3})[ \)\t]+')
versions = GLIBCXX_version.findall(objdump_string)
assert len(versions) > 1
for major, minor, patch in versions:
error_msg = f"found unexpected GLIBCXX version: '{major}.{minor}.{patch}'"
assert int(major) == 3, error_msg
assert int(minor) == 4, error_msg
assert patch == '' or int(patch) <= 22, error_msg
GOMP_version = re.compile(r'0{16}[ \(\t]+G?OMP_(\d{1,2})[.](\d{1,2})[.]?\d{,3}[ \)\t]+')
versions = GOMP_version.findall(objdump_string)
assert len(versions) > 1
for major, minor in versions:
error_msg = f"found unexpected OMP/GOMP version: '{major}.{minor}'"
assert int(major) <= 4, error_msg
assert int(minor) <= 5, error_msg
if __name__ == "__main__":
check_dependencies(Path(sys.argv[1]).read_text(encoding='utf-8'))
|
# coding: utf-8
"""Helper script for checking versions in the dynamic symbol table.
This script checks that LightGBM library is linked to the appropriate symbol versions.
Linking to newer symbol versions at compile time is problematic because it could result
in built artifacts being unusable on older platforms.
Version history for these symbols can be found at the following:
* GLIBC: https://sourceware.org/glibc/wiki/Glibc%20Timeline
* GLIBCXX: https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html
* OMP/GOMP: https://github.com/gcc-mirror/gcc/blob/master/libgomp/libgomp.map
"""
import re
import sys
from pathlib import Path
def check_dependencies(objdump_string: str) -> None:
"""Check the dynamic symbol versions.
Parameters
----------
objdump_string : str
The dynamic symbol table entries of the file (result of `objdump -T` command).
"""
GLIBC_version = re.compile(r'0{16}[ \t]+GLIBC_(\d{1,2})[.](\d{1,3})[.]?\d{,3}[ \t]+')
versions = GLIBC_version.findall(objdump_string)
assert len(versions) > 1
for major, minor in versions:
error_msg = f"found unexpected GLIBC version: '{major}.{minor}'"
assert int(major) <= 2, error_msg
assert int(minor) <= 28, error_msg
GLIBCXX_version = re.compile(r'0{16}[ \t]+GLIBCXX_(\d{1,2})[.](\d{1,2})[.]?(\d{,3})[ \t]+')
versions = GLIBCXX_version.findall(objdump_string)
assert len(versions) > 1
for major, minor, patch in versions:
error_msg = f"found unexpected GLIBCXX version: '{major}.{minor}.{patch}'"
assert int(major) == 3, error_msg
assert int(minor) == 4, error_msg
assert patch == '' or int(patch) <= 22, error_msg
GOMP_version = re.compile(r'0{16}[ \t]+G?OMP_(\d{1,2})[.](\d{1,2})[.]?\d{,3}[ \t]+')
versions = GOMP_version.findall(objdump_string)
assert len(versions) > 1
for major, minor in versions:
error_msg = f"found unexpected OMP/GOMP version: '{major}.{minor}'"
assert int(major) <= 4, error_msg
assert int(minor) <= 5, error_msg
if __name__ == "__main__":
check_dependencies(Path(sys.argv[1]).read_text(encoding='utf-8'))
|
from unittest import mock
from click.testing import CliRunner
from llama_dev.cli import cli
def test_cmd_exec_no_package_no_all_flag():
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "--cmd", "echo hello"])
assert result.exit_code != 0
assert "Either specify a package name or use the --all flag" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
def test_cmd_exec_invalid_package(mock_is_llama_index):
mock_is_llama_index.return_value = False
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "invalid-pkg", "--cmd", "echo hello"])
assert result.exit_code != 0
print(result.output)
assert "not a path to a LlamaIndex package" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.find_all_packages")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_all_flag(mock_subprocess, mock_find_all, data_path):
mock_find_all.return_value = [data_path / "fake/pkg1", data_path / "fake/pkg2"]
mock_subprocess.return_value = mock.Mock(
returncode=0, stdout="Command output", stderr=""
)
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "--all", "--cmd", "echo hello"])
assert result.exit_code == 0
assert mock_subprocess.call_count == 2
assert "Command succeeded" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_single_package_success(mock_subprocess, mock_is_llama_index):
mock_is_llama_index.return_value = True
mock_subprocess.return_value = mock.Mock(
returncode=0, stdout="Command output", stderr=""
)
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "valid-pkg", "--cmd", "echo hello"])
assert result.exit_code == 0
assert mock_subprocess.call_count == 1
assert "Command succeeded" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_failure_without_fail_fast(mock_subprocess, mock_is_llama_index):
mock_is_llama_index.return_value = True
mock_subprocess.return_value = mock.Mock(
returncode=1, stdout="", stderr="Error message"
)
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "valid-pkg", "--cmd", "echo hello"])
assert result.exit_code == 0 # Command should continue despite failure
assert "Command 'echo hello' failed" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_failure_with_fail_fast(mock_subprocess, mock_is_llama_index):
mock_is_llama_index.return_value = True
mock_subprocess.return_value = mock.Mock(
returncode=1, stdout="", stderr="Error message"
)
runner = CliRunner()
result = runner.invoke(
cli, ["pkg", "exec", "valid-pkg", "--cmd", "echo hello", "--fail-fast"]
)
assert result.exit_code != 0 # Command should fail at the first error
assert "Command 'echo hello' failed" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_multiple_packages(mock_subprocess, mock_is_llama_index):
mock_is_llama_index.return_value = True
mock_subprocess.return_value = mock.Mock(
returncode=0, stdout="Command output", stderr=""
)
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "pkg1", "pkg2", "--cmd", "echo hello"])
assert result.exit_code == 0
assert mock_subprocess.call_count == 2
assert "Command succeeded" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_silent(mock_subprocess, mock_is_llama_index):
mock_is_llama_index.return_value = True
mock_subprocess.return_value = mock.Mock(
returncode=0, stdout="Command output", stderr=""
)
runner = CliRunner()
result = runner.invoke(
cli, ["pkg", "exec", "valid-pkg", "--silent", "--cmd", "echo hello"]
)
assert result.exit_code == 0
assert mock_subprocess.call_count == 1
assert result.output == ""
|
from pathlib import Path
from unittest import mock
from click.testing import CliRunner
from llama_dev.cli import cli
def test_cmd_exec_no_package_no_all_flag():
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "--cmd", "echo hello"])
assert result.exit_code != 0
assert "Either specify a package name or use the --all flag" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
def test_cmd_exec_invalid_package(mock_is_llama_index):
mock_is_llama_index.return_value = False
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "invalid-pkg", "--cmd", "echo hello"])
assert result.exit_code != 0
print(result.output)
assert "not a path to a LlamaIndex package" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.find_all_packages")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_all_flag(mock_subprocess, mock_find_all):
mock_find_all.return_value = [Path("/fake/pkg1"), Path("/fake/pkg2")]
mock_subprocess.return_value = mock.Mock(
returncode=0, stdout="Command output", stderr=""
)
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "--all", "--cmd", "echo hello"])
assert result.exit_code == 0
assert mock_subprocess.call_count == 2
assert "Command succeeded" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_single_package_success(mock_subprocess, mock_is_llama_index):
mock_is_llama_index.return_value = True
mock_subprocess.return_value = mock.Mock(
returncode=0, stdout="Command output", stderr=""
)
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "valid-pkg", "--cmd", "echo hello"])
assert result.exit_code == 0
assert mock_subprocess.call_count == 1
assert "Command succeeded" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_failure_without_fail_fast(mock_subprocess, mock_is_llama_index):
mock_is_llama_index.return_value = True
mock_subprocess.return_value = mock.Mock(
returncode=1, stdout="", stderr="Error message"
)
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "valid-pkg", "--cmd", "echo hello"])
assert result.exit_code == 0 # Command should continue despite failure
assert "Command 'echo hello' failed" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_failure_with_fail_fast(mock_subprocess, mock_is_llama_index):
mock_is_llama_index.return_value = True
mock_subprocess.return_value = mock.Mock(
returncode=1, stdout="", stderr="Error message"
)
runner = CliRunner()
result = runner.invoke(
cli, ["pkg", "exec", "valid-pkg", "--cmd", "echo hello", "--fail-fast"]
)
assert result.exit_code != 0 # Command should fail at the first error
assert "Command 'echo hello' failed" in result.output
@mock.patch("llama_dev.pkg.cmd_exec.is_llama_index_package")
@mock.patch("llama_dev.pkg.cmd_exec.subprocess.run")
def test_cmd_exec_multiple_packages(mock_subprocess, mock_is_llama_index):
mock_is_llama_index.return_value = True
mock_subprocess.return_value = mock.Mock(
returncode=0, stdout="Command output", stderr=""
)
runner = CliRunner()
result = runner.invoke(cli, ["pkg", "exec", "pkg1", "pkg2", "--cmd", "echo hello"])
assert result.exit_code == 0
assert mock_subprocess.call_count == 2
assert "Command succeeded" in result.output
|
from typing import Dict, List, Optional, Callable
from jina.importer import ImportExtensions
from jina.types.request.data import DataRequest
from jina import DocumentArray
from jina._docarray import docarray_v2
if docarray_v2:
from docarray import DocList
def get_fastapi_app(
request_models_map: Dict,
caller: Callable,
**kwargs
):
"""
Get the app from FastAPI as the REST interface.
:param request_models_map: Map describing the endpoints and its Pydantic models
:param caller: Callable to be handled by the endpoints of the returned FastAPI app
:param kwargs: Extra kwargs to make it compatible with other methods
:return: fastapi app
"""
with ImportExtensions(required=True):
from fastapi import FastAPI, Response, HTTPException
import pydantic
from jina.proto import jina_pb2
app = FastAPI()
def add_route(endpoint_path, input_model, output_model, input_doc_list_model=None, output_doc_list_model=None):
app_kwargs = dict(path=f'/{endpoint_path.strip("/")}',
methods=['POST'],
summary=f'Endpoint {endpoint_path}',
response_model=output_model, )
if docarray_v2:
from docarray.base_doc.docarray_response import DocArrayResponse
app_kwargs['response_class'] = DocArrayResponse
@app.api_route(
**app_kwargs
)
async def post(body: input_model, response: Response):
req = DataRequest()
if not docarray_v2:
req.data.docs = DocumentArray.from_pydantic_model(body.data)
else:
req.data.docs = DocList[input_doc_list_model](body.data)
req.parameters = body.parameters
req.header.exec_endpoint = endpoint_path
resp = await caller(req)
status = resp.header.status
if status.code == jina_pb2.StatusProto.ERROR:
raise HTTPException(status_code=499, detail=status.description)
else:
if not docarray_v2:
docs_response = resp.docs.to_dict()
else:
docs_response = resp.docs
ret = output_model(data=docs_response, parameters=resp.parameters)
return ret
for endpoint, input_output_map in request_models_map.items():
if endpoint != '_jina_dry_run_':
input_doc_model = input_output_map['input']['model']
output_doc_model = input_output_map['output']['model']
endpoint_input_model = pydantic.create_model(
f'{endpoint.strip("/")}_input_model',
data=(List[input_doc_model], []),
parameters=(Optional[Dict], None),
__config__=input_doc_model.__config__
)
endpoint_output_model = pydantic.create_model(
f'{endpoint.strip("/")}_output_model',
data=(List[output_doc_model], []),
parameters=(Optional[Dict], None),
__config__=output_doc_model.__config__
)
add_route(endpoint,
input_model=endpoint_input_model,
output_model=endpoint_output_model,
input_doc_list_model=input_doc_model,
output_doc_list_model=output_doc_model)
from jina.serve.runtimes.gateway.health_model import JinaHealthModel
@app.get(
path='/',
summary='Get the health of Jina Executor service',
response_model=JinaHealthModel,
)
async def _executor_health():
"""
Get the health of this Gateway service.
.. # noqa: DAR201
"""
return {}
return app
|
from typing import Dict, List, Optional, Callable
from jina.importer import ImportExtensions
from jina.types.request.data import DataRequest
from jina import DocumentArray
from jina._docarray import docarray_v2
if docarray_v2:
from docarray import DocList
def get_fastapi_app(
request_models_map: Dict,
caller: Callable,
**kwargs
):
"""
Get the app from FastAPI as the REST interface.
:param request_models_map: Map describing the endpoints and its Pydantic models
:param caller: Callable to be handled by the endpoints of the returned FastAPI app
:param kwargs: Extra kwargs to make it compatible with other methods
:return: fastapi app
"""
with ImportExtensions(required=True):
from fastapi import FastAPI, Response, HTTPException
import pydantic
from jina.proto import jina_pb2
app = FastAPI()
def add_route(endpoint_path, input_model, output_model, input_doc_list_model=None, output_doc_list_model=None):
app_kwargs = dict(path=f'/{endpoint_path.strip("/")}',
methods=['POST'],
summary=f'Endpoint {endpoint_path}',
response_model=output_model, )
if docarray_v2:
from docarray.base_doc.docarray_response import DocArrayResponse
app_kwargs['response_class'] = DocArrayResponse
@app.api_route(
**app_kwargs
)
async def post(body: input_model, response: Response):
req = DataRequest()
if not docarray_v2:
req.data.docs = DocumentArray.from_pydantic_model(body.data)
else:
req.data.docs = DocList[input_doc_list_model](body.data)
req.parameters = body.parameters
req.header.exec_endpoint = endpoint_path
resp = await caller(req)
status = resp.header.status
if status.code == jina_pb2.StatusProto.ERROR:
raise HTTPException(status_code=499, detail=status.description)
else:
if not docarray_v2:
docs_response = resp.docs.to_dict()
else:
docs_response = resp.docs._data
ret = output_model(data=docs_response, parameters=resp.parameters)
return ret
for endpoint, input_output_map in request_models_map.items():
if endpoint != '_jina_dry_run_':
input_doc_model = input_output_map['input']['model']
output_doc_model = input_output_map['output']['model']
endpoint_input_model = pydantic.create_model(
f'{endpoint.strip("/")}_input_model',
data=(List[input_doc_model], []),
parameters=(Optional[Dict], None),
__config__=input_doc_model.__config__
)
endpoint_output_model = pydantic.create_model(
f'{endpoint.strip("/")}_output_model',
data=(List[output_doc_model], []),
parameters=(Optional[Dict], None),
__config__=output_doc_model.__config__
)
add_route(endpoint,
input_model=endpoint_input_model,
output_model=endpoint_output_model,
input_doc_list_model=input_doc_model,
output_doc_list_model=output_doc_model)
from jina.serve.runtimes.gateway.health_model import JinaHealthModel
@app.get(
path='/',
summary='Get the health of Jina Executor service',
response_model=JinaHealthModel,
)
async def _executor_health():
"""
Get the health of this Gateway service.
.. # noqa: DAR201
"""
return {}
return app
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
import torch
import torchvision.models.video as models
from jina import Document, DocumentArray, Executor
from torchvision import transforms
from video_torch_encoder import ConvertFCHWtoCFHW, ConvertFHWCtoFCHW, VideoTorchEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.batch_size == 32
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder(model_name):
ex = VideoTorchEncoder(
model_name=model_name, use_preprocessing=False, download_progress=False
)
da = DocumentArray(
[Document(blob=np.random.random((3, 2, 224, 224))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('batch_size', [1, 3, 10])
def test_video_torch_encoder_traversal_paths(batch_size):
ex = VideoTorchEncoder(use_preprocessing=False, download_progress=False)
def _create_doc_with_video_chunks():
d = Document(blob=np.random.random((3, 2, 112, 112)))
d.chunks = [Document(blob=np.random.random((3, 2, 112, 112))) for _ in range(5)]
return d
da = DocumentArray([_create_doc_with_video_chunks() for _ in range(10)])
ex.encode(da, {'traversal_paths': ['r', 'c'], 'batch_size': batch_size})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
assert len(doc.chunks) == 5
for chunk in doc.chunks:
assert chunk.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder_use_preprocessing(model_name):
ex = VideoTorchEncoder(
model_name=model_name, use_preprocessing=True, download_progress=False
)
da = DocumentArray(
[Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(
root=Path(__file__).parents[1] / 'data/kinetics400', frames_per_clip=20
)
return [dataset[0][0], dataset[0][0]]
@pytest.mark.parametrize('model_name', ['mc3_18', 'r2plus1d_18', 'r3d_18'])
def test_with_dataset_video(model_name, kinects_videos):
da = DocumentArray(
[Document(blob=video.detach().numpy()) for video in kinects_videos]
)
ex = VideoTorchEncoder(
use_preprocessing=True,
model_name=model_name,
download_progress=False,
)
ex.encode(da, {})
assert len(da) == 2
for doc in da:
assert doc.embedding.shape == (512,)
model = getattr(models, model_name)(pretrained=True, progress=False).eval()
mean = (0.43216, 0.394666, 0.37645)
std = (0.22803, 0.22145, 0.216989)
resize_size = (128, 171)
crop_size = (112, 112)
t = transforms.Compose(
[
ConvertFHWCtoFCHW(),
transforms.ConvertImageDtype(torch.float32),
transforms.Resize(resize_size),
transforms.Normalize(mean=mean, std=std),
transforms.CenterCrop(crop_size),
ConvertFCHWtoCFHW(),
]
)
tensor = torch.stack([t(video) for video in kinects_videos])
def _get_embeddings(x) -> torch.Tensor:
embeddings = torch.Tensor()
def get_activation(model, model_input, output):
nonlocal embeddings
embeddings = output
handle = model.avgpool.register_forward_hook(get_activation)
model(x)
handle.remove()
return embeddings.flatten(1)
embedding_batch = _get_embeddings(tensor)
for doc, expected_torch_embedding in zip(da, embedding_batch):
np.testing.assert_almost_equal(
doc.embedding, expected_torch_embedding.detach().numpy()
)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
@pytest.mark.gpu
def test_video_torch_encoder_use_preprocessing_gpu(model_name):
ex = VideoTorchEncoder(
model_name=model_name,
use_preprocessing=True,
device='cuda',
download_progress=False,
)
da = DocumentArray(
[Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)]
)
assert ex.device == 'cuda'
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
import torch
import torchvision.models.video as models
from jina import Document, DocumentArray, Executor
from torchvision import transforms
from video_torch_encoder import ConvertFCHWtoCFHW, ConvertFHWCtoFCHW, VideoTorchEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder(model_name):
ex = VideoTorchEncoder(
model_name=model_name, use_default_preprocessing=False, download_progress=False
)
da = DocumentArray(
[Document(blob=np.random.random((3, 2, 224, 224))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('batch_size', [1, 3, 10])
def test_video_torch_encoder_traversal_paths(batch_size):
ex = VideoTorchEncoder(use_default_preprocessing=False, download_progress=False)
def _create_doc_with_video_chunks():
d = Document(blob=np.random.random((3, 2, 112, 112)))
d.chunks = [Document(blob=np.random.random((3, 2, 112, 112))) for _ in range(5)]
return d
da = DocumentArray([_create_doc_with_video_chunks() for _ in range(10)])
ex.encode(da, {'traversal_paths': ['r', 'c'], 'batch_size': batch_size})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
assert len(doc.chunks) == 5
for chunk in doc.chunks:
assert chunk.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder_use_default_preprocessing(model_name):
ex = VideoTorchEncoder(
model_name=model_name, use_default_preprocessing=True, download_progress=False
)
da = DocumentArray(
[Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(
root=Path(__file__).parents[1] / 'data/kinetics400', frames_per_clip=20
)
return [dataset[0][0], dataset[0][0]]
@pytest.mark.parametrize('model_name', ['mc3_18', 'r2plus1d_18', 'r3d_18'])
def test_with_dataset_video(model_name, kinects_videos):
da = DocumentArray(
[Document(blob=video.detach().numpy()) for video in kinects_videos]
)
ex = VideoTorchEncoder(
use_default_preprocessing=True,
model_name=model_name,
download_progress=False,
)
ex.encode(da, {})
assert len(da) == 2
for doc in da:
assert doc.embedding.shape == (512,)
model = getattr(models, model_name)(pretrained=True, progress=False).eval()
mean = (0.43216, 0.394666, 0.37645)
std = (0.22803, 0.22145, 0.216989)
resize_size = (128, 171)
crop_size = (112, 112)
t = transforms.Compose(
[
ConvertFHWCtoFCHW(),
transforms.ConvertImageDtype(torch.float32),
transforms.Resize(resize_size),
transforms.Normalize(mean=mean, std=std),
transforms.CenterCrop(crop_size),
ConvertFCHWtoCFHW(),
]
)
tensor = torch.stack([t(video) for video in kinects_videos])
def _get_embeddings(x) -> torch.Tensor:
embeddings = torch.Tensor()
def get_activation(model, model_input, output):
nonlocal embeddings
embeddings = output
handle = model.avgpool.register_forward_hook(get_activation)
model(x)
handle.remove()
return embeddings.flatten(1)
embedding_batch = _get_embeddings(tensor)
for doc, expected_torch_embedding in zip(da, embedding_batch):
np.testing.assert_almost_equal(
doc.embedding, expected_torch_embedding.detach().numpy()
)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
@pytest.mark.gpu
def test_video_torch_encoder_use_default_preprocessing_gpu(model_name):
ex = VideoTorchEncoder(
model_name=model_name,
use_default_preprocessing=True,
device='cuda',
download_progress=False,
)
da = DocumentArray(
[Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)]
)
assert ex.device == 'cuda'
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
import torch
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# Use "convert_to_tensor=True" to keep the tensors on GPU (if available)
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"A man is eating pasta.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah chases prey on across a field.",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
similarity_scores = embedder.similarity(query_embedding, corpus_embeddings)[0]
scores, indices = torch.topk(similarity_scores, k=top_k)
print("\nQuery:", query)
print("Top 5 most similar sentences in corpus:")
for score, idx in zip(scores, indices):
print(corpus[idx], "(Score: {:.4f})".format(score))
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
from sentence_transformers import SentenceTransformer, util
import torch
embedder = SentenceTransformer('all-MiniLM-L6-v2')
# Corpus with example sentences
corpus = ['A man is eating food.',
'A man is eating a piece of bread.',
'The girl is carrying a baby.',
'A man is riding a horse.',
'A woman is playing violin.',
'Two men pushed carts through the woods.',
'A man is riding a white horse on an enclosed ground.',
'A monkey is playing drums.',
'A cheetah is running behind its prey.'
]
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
# Query sentences:
queries = ['A man is eating pasta.', 'Someone in a gorilla costume is playing a set of drums.', 'A cheetah chases prey on across a field.']
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
cos_scores = util.cos_sim(query_embedding, corpus_embeddings)[0]
top_results = torch.topk(cos_scores, k=top_k)
print("\n\n======================\n\n")
print("Query:", query)
print("\nTop 5 most similar sentences in corpus:")
for score, idx in zip(top_results[0], top_results[1]):
print(corpus[idx], "(Score: {:.4f})".format(score))
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.5.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.5.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
from typing import Optional
import pytest
import torch
from docarray import BaseDoc, DocList
from docarray.array.any_array import AnyDocArray
from docarray.documents import TextDoc
from docarray.typing import TorchTensor
num_docs = 5
num_sub_docs = 2
num_sub_sub_docs = 3
@pytest.fixture
def multi_model_docs():
class SubSubDoc(BaseDoc):
sub_sub_text: TextDoc
sub_sub_tensor: TorchTensor[2]
class SubDoc(BaseDoc):
sub_text: TextDoc
sub_da: DocList[SubSubDoc]
class MultiModalDoc(BaseDoc):
mm_text: TextDoc
mm_tensor: Optional[TorchTensor[3, 2, 2]]
mm_da: DocList[SubDoc]
docs = DocList[MultiModalDoc](
[
MultiModalDoc(
mm_text=TextDoc(text=f'hello{i}'),
mm_da=[
SubDoc(
sub_text=TextDoc(text=f'sub_{i}_1'),
sub_da=DocList[SubSubDoc](
[
SubSubDoc(
sub_sub_text=TextDoc(text='subsub'),
sub_sub_tensor=torch.zeros(2),
)
for _ in range(num_sub_sub_docs)
]
),
)
for _ in range(num_sub_docs)
],
)
for i in range(num_docs)
]
)
return docs
@pytest.mark.parametrize(
'access_path,len_result',
[
('mm_text', num_docs), # List of 5 Text objs
('mm_text__text', num_docs), # List of 5 strings
('mm_da', num_docs * num_sub_docs), # List of 5 * 2 SubDoc objs
('mm_da__sub_text', num_docs * num_sub_docs), # List of 5 * 2 Text objs
(
'mm_da__sub_da',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 SubSubDoc objs
(
'mm_da__sub_da__sub_sub_text',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 Text objs
],
)
def test_traverse_flat(multi_model_docs, access_path, len_result):
traversed = multi_model_docs.traverse_flat(access_path)
assert len(traversed) == len_result
def test_traverse_stacked_da():
class Image(BaseDoc):
tensor: TorchTensor[3, 224, 224]
batch = DocList[Image](
[
Image(
tensor=torch.zeros(3, 224, 224),
)
for _ in range(2)
]
)
batch_stacked = batch.to_doc_vec()
tensors = batch_stacked.traverse_flat(access_path='tensor')
assert tensors.shape == (2, 3, 224, 224)
assert isinstance(tensors, torch.Tensor)
@pytest.mark.parametrize(
'input_list,output_list',
[
([1, 2, 3], [1, 2, 3]),
([[1], [2], [3]], [1, 2, 3]),
([[[1]], [[2]], [[3]]], [[1], [2], [3]]),
],
)
def test_flatten_one_level(input_list, output_list):
flattened = AnyDocArray._flatten_one_level(sequence=input_list)
assert flattened == output_list
def test_flatten_one_level_list_of_da():
doc = BaseDoc()
input_list = [DocList([doc, doc, doc])]
flattened = AnyDocArray._flatten_one_level(sequence=input_list)
assert flattened == [doc, doc, doc]
|
from typing import Optional
import pytest
import torch
from docarray import BaseDoc, DocList
from docarray.array.any_array import AnyDocArray
from docarray.documents import TextDoc
from docarray.typing import TorchTensor
num_docs = 5
num_sub_docs = 2
num_sub_sub_docs = 3
@pytest.fixture
def multi_model_docs():
class SubSubDoc(BaseDoc):
sub_sub_text: TextDoc
sub_sub_tensor: TorchTensor[2]
class SubDoc(BaseDoc):
sub_text: TextDoc
sub_da: DocList[SubSubDoc]
class MultiModalDoc(BaseDoc):
mm_text: TextDoc
mm_tensor: Optional[TorchTensor[3, 2, 2]]
mm_da: DocList[SubDoc]
docs = DocList[MultiModalDoc](
[
MultiModalDoc(
mm_text=TextDoc(text=f'hello{i}'),
mm_da=[
SubDoc(
sub_text=TextDoc(text=f'sub_{i}_1'),
sub_da=DocList[SubSubDoc](
[
SubSubDoc(
sub_sub_text=TextDoc(text='subsub'),
sub_sub_tensor=torch.zeros(2),
)
for _ in range(num_sub_sub_docs)
]
),
)
for _ in range(num_sub_docs)
],
)
for i in range(num_docs)
]
)
return docs
@pytest.mark.parametrize(
'access_path,len_result',
[
('mm_text', num_docs), # List of 5 Text objs
('mm_text__text', num_docs), # List of 5 strings
('mm_da', num_docs * num_sub_docs), # List of 5 * 2 SubDoc objs
('mm_da__sub_text', num_docs * num_sub_docs), # List of 5 * 2 Text objs
(
'mm_da__sub_da',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 SubSubDoc objs
(
'mm_da__sub_da__sub_sub_text',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 Text objs
],
)
def test_traverse_flat(multi_model_docs, access_path, len_result):
traversed = multi_model_docs.traverse_flat(access_path)
assert len(traversed) == len_result
def test_traverse_stacked_da():
class Image(BaseDoc):
tensor: TorchTensor[3, 224, 224]
batch = DocList[Image](
[
Image(
tensor=torch.zeros(3, 224, 224),
)
for _ in range(2)
]
)
batch_stacked = batch.stack()
tensors = batch_stacked.traverse_flat(access_path='tensor')
assert tensors.shape == (2, 3, 224, 224)
assert isinstance(tensors, torch.Tensor)
@pytest.mark.parametrize(
'input_list,output_list',
[
([1, 2, 3], [1, 2, 3]),
([[1], [2], [3]], [1, 2, 3]),
([[[1]], [[2]], [[3]]], [[1], [2], [3]]),
],
)
def test_flatten_one_level(input_list, output_list):
flattened = AnyDocArray._flatten_one_level(sequence=input_list)
assert flattened == output_list
def test_flatten_one_level_list_of_da():
doc = BaseDoc()
input_list = [DocList([doc, doc, doc])]
flattened = AnyDocArray._flatten_one_level(sequence=input_list)
assert flattened == [doc, doc, doc]
|
_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py'
# training schedule for 2x
train_cfg = dict(max_epochs=36)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
|
_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py'
# learning policy
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.19.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.18.1'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
"""
A quantized model executes some or all of the operations with integers rather than floating point values. This allows for a more compact models and the use of high performance vectorized operations on many hardware platforms.
As a result, you get about 40% smaller and faster models. The speed-up depends on your CPU and how PyTorch was build and can be anywhere between 10% speed-up and 300% speed-up.
Note: Quantized models are only available for CPUs. Use a GPU, if available, for optimal performance.
For more details:
https://pytorch.org/docs/stable/quantization.html
"""
import csv
import gzip
import logging
import os
import time
import torch
from torch.nn import Embedding, Linear
from torch.quantization import quantize_dynamic
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Limit torch to 4 threads
torch.set_num_threads(4)
#### Just some code to print debug information to stdout
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
### /print debug information to stdout
model_name = "all-distilroberta-v1"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name, device="cpu")
q_model = quantize_dynamic(model, {Linear, Embedding})
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark dataset")
test_samples = []
sentences = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
sentences.append(row["sentence1"])
sentences.append(row["sentence2"])
if row["split"] == "test":
test_samples.append(inp_example)
sentences = sentences[0:10000]
logging.info("Evaluating speed of unquantized model")
start_time = time.time()
emb = model.encode(sentences, show_progress_bar=True)
diff_normal = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_normal, len(sentences) / diff_normal))
logging.info("Evaluating speed of quantized model")
start_time = time.time()
emb = q_model.encode(sentences, show_progress_bar=True)
diff_quantized = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_quantized, len(sentences) / diff_quantized))
logging.info("Speed-up: {:.2f}".format(diff_normal / diff_quantized))
#########
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
logging.info("Evaluate regular model")
model.evaluate(evaluator)
print("\n\n")
logging.info("Evaluate quantized model")
q_model.evaluate(evaluator)
|
"""
A quantized model executes some or all of the operations with integers rather than floating point values. This allows for a more compact models and the use of high performance vectorized operations on many hardware platforms.
As a result, you get about 40% smaller and faster models. The speed-up depends on your CPU and how PyTorch was build and can be anywhere between 10% speed-up and 300% speed-up.
Note: Quantized models are only available for CPUs. Use a GPU, if available, for optimal performance.
For more details:
https://pytorch.org/docs/stable/quantization.html
"""
import logging
import os
import torch
from sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from torch.nn import Embedding, Linear
from torch.quantization import quantize_dynamic
import gzip
import csv
import time
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Limit torch to 4 threads
torch.set_num_threads(4)
#### Just some code to print debug information to stdout
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
### /print debug information to stdout
model_name = "all-distilroberta-v1"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name, device="cpu")
q_model = quantize_dynamic(model, {Linear, Embedding})
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark dataset")
test_samples = []
sentences = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
sentences.append(row["sentence1"])
sentences.append(row["sentence2"])
if row["split"] == "test":
test_samples.append(inp_example)
sentences = sentences[0:10000]
logging.info("Evaluating speed of unquantized model")
start_time = time.time()
emb = model.encode(sentences, show_progress_bar=True)
diff_normal = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_normal, len(sentences) / diff_normal))
logging.info("Evaluating speed of quantized model")
start_time = time.time()
emb = q_model.encode(sentences, show_progress_bar=True)
diff_quantized = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_quantized, len(sentences) / diff_quantized))
logging.info("Speed-up: {:.2f}".format(diff_normal / diff_quantized))
#########
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
logging.info("Evaluate regular model")
model.evaluate(evaluator)
print("\n\n")
logging.info("Evaluate quantized model")
q_model.evaluate(evaluator)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.19.3.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# isort: split
# Deprecated modules
from . import arrow_dataset as _arrow_dataset
from . import utils as _utils
from .exceptions import ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
from .utils import download_manager as _deprecated_download_manager
from .utils import info_utils as _deprecated_info_utils
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
_deprecated_info_utils.ExpectedMoreDownloadedFiles = ExpectedMoreDownloadedFiles
_deprecated_info_utils.ExpectedMoreSplits = ExpectedMoreSplits
_deprecated_info_utils.UnexpectedDownloadedFile = UnexpectedDownloadedFile
_deprecated_info_utils.UnexpectedSplits = UnexpectedSplits
del _arrow_dataset, _utils, _deprecated_download_manager
del _deprecated_info_utils, ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.19.3.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# isort: split
# Deprecated modules
from . import arrow_dataset as _arrow_dataset
from . import utils as _utils
from .utils import download_manager as _deprecated_download_manager
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_cnn-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to vectors using BERT
word_embedding_model = models.Transformer(model_name)
cnn = models.CNN(
in_word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(),
out_channels=256,
kernel_sizes=[1, 3, 5],
)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
cnn.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, cnn, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="cnn", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-cnn")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-cnn')`."
)
|
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_cnn-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to vectors using BERT
word_embedding_model = models.Transformer(model_name)
cnn = models.CNN(
in_word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(),
out_channels=256,
kernel_sizes=[1, 3, 5],
)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
cnn.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, cnn, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="cnn", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-cnn")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-cnn')`."
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.device import is_npu_available
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_wrapper import OptimWrapper
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def register_dadaptation_optimizers() -> List[str]:
"""Register optimizers in ``dadaptation`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
dadaptation_optimizers = []
try:
import dadaptation
except ImportError:
pass
else:
for module_name in ['DAdaptAdaGrad', 'DAdaptAdam', 'DAdaptSGD']:
_optim = getattr(dadaptation, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
dadaptation_optimizers.append(module_name)
return dadaptation_optimizers
DADAPTATION_OPTIMIZERS = register_dadaptation_optimizers()
def register_lion_optimizers() -> List[str]:
"""Register Lion optimizer to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
optimizers = []
try:
from lion_pytorch import Lion
except ImportError:
pass
else:
OPTIMIZERS.register_module(module=Lion)
optimizers.append('Lion')
return optimizers
LION_OPTIMIZERS = register_lion_optimizers()
def build_optim_wrapper(model: nn.Module,
cfg: Union[dict, Config, ConfigDict]) -> OptimWrapper:
"""Build function of OptimWrapper.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer wrapper constructor, and use optimizer wrapper constructor to
build the optimizer wrapper. If ``constructor`` is not set, the
``DefaultOptimWrapperConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer wrapper, optimizer constructor and
optimizer.
Returns:
OptimWrapper: The built optimizer wrapper.
"""
optim_wrapper_cfg = copy.deepcopy(cfg)
constructor_type = optim_wrapper_cfg.pop('constructor',
'DefaultOptimWrapperConstructor')
paramwise_cfg = optim_wrapper_cfg.pop('paramwise_cfg', None)
# Since the current generation of NPU(Ascend 910) only supports
# mixed precision training, here we turn on mixed precision by default
# on the NPU to make the training normal
if is_npu_available():
optim_wrapper_cfg['type'] = 'AmpOptimWrapper'
optim_wrapper_constructor = OPTIM_WRAPPER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optim_wrapper_cfg=optim_wrapper_cfg,
paramwise_cfg=paramwise_cfg))
optim_wrapper = optim_wrapper_constructor(model)
return optim_wrapper
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.device import is_npu_available
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_wrapper import OptimWrapper
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def register_dadaptation_optimizers() -> List[str]:
"""Register optimizers in ``dadaptation`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
dadaptation_optimizers = []
try:
import dadaptation
except ImportError:
pass
else:
for module_name in ['DAdaptAdaGrad', 'DAdaptAdam', 'DAdaptSGD']:
_optim = getattr(dadaptation, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
dadaptation_optimizers.append(module_name)
return dadaptation_optimizers
DADAPTATION_OPTIMIZERS = register_dadaptation_optimizers()
def build_optim_wrapper(model: nn.Module,
cfg: Union[dict, Config, ConfigDict]) -> OptimWrapper:
"""Build function of OptimWrapper.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer wrapper constructor, and use optimizer wrapper constructor to
build the optimizer wrapper. If ``constructor`` is not set, the
``DefaultOptimWrapperConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer wrapper, optimizer constructor and
optimizer.
Returns:
OptimWrapper: The built optimizer wrapper.
"""
optim_wrapper_cfg = copy.deepcopy(cfg)
constructor_type = optim_wrapper_cfg.pop('constructor',
'DefaultOptimWrapperConstructor')
paramwise_cfg = optim_wrapper_cfg.pop('paramwise_cfg', None)
# Since the current generation of NPU(Ascend 910) only supports
# mixed precision training, here we turn on mixed precision by default
# on the NPU to make the training normal
if is_npu_available():
optim_wrapper_cfg['type'] = 'AmpOptimWrapper'
optim_wrapper_constructor = OPTIM_WRAPPER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optim_wrapper_cfg=optim_wrapper_cfg,
paramwise_cfg=paramwise_cfg))
optim_wrapper = optim_wrapper_constructor(model)
return optim_wrapper
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, ToDataContainer,
ToTensor, Transpose)
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomErasing, RandomFlip, RandomShift, Resize,
SegRescale, YOLOXHSVRandomAug)
from .wrappers import MultiBranch
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'LoadImageFromNDArray', 'LoadAnnotations',
'LoadPanopticAnnotations', 'LoadMultiChannelImageFromFiles',
'LoadProposals', 'Resize', 'RandomFlip', 'RandomCrop', 'Normalize',
'SegRescale', 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion',
'Albu', 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut',
'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize', 'Brightness',
'Contrast', 'TranslateX', 'TranslateY', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste', 'FilterAnnotations',
'Pad', 'GeomTransform', 'ColorTransform', 'RandAugment', 'Sharpness',
'Solarize', 'SolarizeAdd', 'Posterize', 'AutoContrast', 'Invert',
'MultiBranch', 'RandomErasing'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, ToDataContainer,
ToTensor, Transpose)
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'LoadImageFromNDArray', 'LoadAnnotations',
'LoadPanopticAnnotations', 'LoadMultiChannelImageFromFiles',
'LoadProposals', 'Resize', 'RandomFlip', 'RandomCrop', 'Normalize',
'SegRescale', 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion',
'Albu', 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut',
'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize', 'Brightness',
'Contrast', 'TranslateX', 'TranslateY', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste', 'FilterAnnotations',
'Pad', 'GeomTransform', 'ColorTransform', 'RandAugment', 'Sharpness',
'Solarize', 'SolarizeAdd', 'Posterize', 'AutoContrast', 'Invert',
'MultiBranch'
]
|
from typing import Any # noqa: F401
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.torch_tensor import TorchTensor
torch_base = type(TorchTensor) # type: Any
embedding_base = type(EmbeddingMixin) # type: Any
class metaTorchAndEmbedding(torch_base, embedding_base):
pass
@_register_proto(proto_type_name='torch_embedding')
class TorchEmbedding(TorchTensor, EmbeddingMixin, metaclass=metaTorchAndEmbedding):
alternative_type = TorchTensor
|
from typing import Any # noqa: F401
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.torch_tensor import TorchTensor
torch_base = type(TorchTensor) # type: Any
embedding_base = type(EmbeddingMixin) # type: Any
class metaTorchAndEmbedding(torch_base, embedding_base):
pass
class TorchEmbedding(TorchTensor, EmbeddingMixin, metaclass=metaTorchAndEmbedding):
alternative_type = TorchTensor
|
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List
import torch
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
"""
tokenize_fn: Callable
valid_label_columns: List[str] = field(default_factory=lambda: ["label", "score"])
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
columns = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in columns:
columns.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in columns:
batch["label"] = torch.tensor([row[label_column] for row in features])
columns.remove(label_column)
break
# Extract the feature columns
for column in columns:
tokenized = self.tokenize_fn([row[column] for row in features])
for key, value in tokenized.items():
batch[f"{column}_{key}"] = value
return batch
|
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List
import torch
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/training/overview.html
"""
tokenize_fn: Callable
valid_label_columns: List[str] = field(default_factory=lambda: ["label", "score"])
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
columns = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in columns:
columns.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in columns:
batch["label"] = torch.tensor([row[label_column] for row in features])
columns.remove(label_column)
break
# Extract the feature columns
for column in columns:
tokenized = self.tokenize_fn([row[column] for row in features])
for key, value in tokenized.items():
batch[f"{column}_{key}"] = value
return batch
|
from typing import Any, Sequence, Union
from deprecated import deprecated
from llama_index.core.base.llms.generic_utils import (
chat_response_to_completion_response,
stream_chat_response_to_completion_response,
astream_chat_response_to_completion_response,
)
from llama_index.core.base.llms.types import (
ChatMessage,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
MessageRole,
ImageBlock,
TextBlock,
)
from llama_index.core.schema import ImageNode
from llama_index.llms.openai import OpenAI
from llama_index.core.base.llms.generic_utils import image_node_to_image_block
@deprecated(
reason="The package has been deprecated and will no longer be maintained. Please use llama-index-llms-openai (preferably the Responses API) instead. See Multi Modal LLMs documentation for a complete guide on migration: https://docs.llamaindex.ai/en/stable/understanding/using_llms/using_llms/#multi-modal-llms",
version="0.5.2",
)
class OpenAIMultiModal(OpenAI):
@classmethod
def class_name(cls) -> str:
return "openai_multi_modal_llm"
def _get_multi_modal_chat_message(
self,
prompt: str,
role: str,
image_documents: Sequence[Union[ImageBlock, ImageNode]],
) -> ChatMessage:
chat_msg = ChatMessage(role=role, content=prompt)
if not image_documents:
# if image_documents is empty, return text only chat message
return chat_msg
chat_msg.blocks.append(TextBlock(text=prompt))
if all(isinstance(doc, ImageNode) for doc in image_documents):
chat_msg.blocks.extend(
[image_node_to_image_block(doc) for doc in image_documents]
)
else:
chat_msg.blocks.extend(image_documents)
return chat_msg
def complete(
self,
prompt: str,
image_documents: Sequence[Union[ImageNode, ImageBlock]],
**kwargs: Any,
) -> CompletionResponse:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = self.chat([chat_message], **kwargs)
return chat_response_to_completion_response(chat_response)
def stream_complete(
self,
prompt: str,
image_documents: Sequence[Union[ImageNode, ImageBlock]],
**kwargs: Any,
) -> CompletionResponseGen:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = self.stream_chat([chat_message], **kwargs)
return stream_chat_response_to_completion_response(chat_response)
# ===== Async Endpoints =====
async def acomplete(
self,
prompt: str,
image_documents: Sequence[Union[ImageNode, ImageBlock]],
**kwargs: Any,
) -> CompletionResponse:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = await self.achat([chat_message], **kwargs)
return chat_response_to_completion_response(chat_response)
async def astream_complete(
self,
prompt: str,
image_documents: Sequence[Union[ImageNode, ImageBlock]],
**kwargs: Any,
) -> CompletionResponseAsyncGen:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = await self.astream_chat([chat_message], **kwargs)
return astream_chat_response_to_completion_response(chat_response)
|
from typing import Any, Optional, Sequence
from pathlib import Path
from llama_index.core.base.llms.generic_utils import (
chat_response_to_completion_response,
stream_chat_response_to_completion_response,
astream_chat_response_to_completion_response,
)
from llama_index.core.base.llms.types import (
ChatMessage,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
MessageRole,
ImageBlock,
)
from llama_index.core.schema import ImageNode
from llama_index.llms.openai import OpenAI
class OpenAIMultiModal(OpenAI):
@classmethod
def class_name(cls) -> str:
return "openai_multi_modal_llm"
def _get_multi_modal_chat_message(
self,
prompt: str,
role: str,
image_documents: Sequence[ImageNode],
image_detail: Optional[str] = "low",
**kwargs: Any,
) -> ChatMessage:
chat_msg = ChatMessage(role=role, content=prompt)
if not image_documents:
# if image_documents is empty, return text only chat message
return chat_msg
for image_document in image_documents:
# Create the appropriate ContentBlock depending on the document content
if image_document.image:
chat_msg.blocks.append(
ImageBlock(
image=bytes(image_document.image, encoding="utf-8"),
detail=image_detail,
)
)
elif image_document.image_url:
chat_msg.blocks.append(
ImageBlock(url=image_document.image_url, detail=image_detail)
)
elif image_document.image_path:
chat_msg.blocks.append(
ImageBlock(
path=Path(image_document.image_path),
detail=image_detail,
image_mimetype=image_document.image_mimetype
or image_document.metadata.get("file_type"),
)
)
elif f_path := image_document.metadata.get("file_path"):
chat_msg.blocks.append(
ImageBlock(
path=Path(f_path),
detail=image_detail,
image_mimetype=image_document.metadata.get("file_type"),
)
)
return chat_msg
def complete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> CompletionResponse:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = self.chat([chat_message], **kwargs)
return chat_response_to_completion_response(chat_response)
def stream_complete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> CompletionResponseGen:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = self.stream_chat([chat_message], **kwargs)
return stream_chat_response_to_completion_response(chat_response)
# ===== Async Endpoints =====
async def acomplete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> CompletionResponse:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = await self.achat([chat_message], **kwargs)
return chat_response_to_completion_response(chat_response)
async def astream_complete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> CompletionResponseAsyncGen:
chat_message = self._get_multi_modal_chat_message(
prompt=prompt,
role=MessageRole.USER,
image_documents=image_documents,
)
chat_response = await self.astream_chat([chat_message], **kwargs)
return astream_chat_response_to_completion_response(chat_response)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import ParamSchedulerHook
class TestParamSchedulerHook:
def test_after_iter(self):
Hook = ParamSchedulerHook()
Runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = False
Runner.param_schedulers = [scheduler]
Hook.after_train_iter(Runner)
scheduler.step.assert_called()
def test_after_epoch(self):
Hook = ParamSchedulerHook()
Runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = True
Runner.param_schedulers = [scheduler]
Hook.after_train_epoch(Runner)
scheduler.step.assert_called()
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import ParamSchedulerHook
class TestParamSchedulerHook:
def test_after_iter(self):
Hook = ParamSchedulerHook()
Runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = False
Runner.schedulers = [scheduler]
Hook.after_train_iter(Runner)
scheduler.step.assert_called()
def test_after_epoch(self):
Hook = ParamSchedulerHook()
Runner = Mock()
scheduler = Mock()
scheduler.step = Mock()
scheduler.by_epoch = True
Runner.schedulers = [scheduler]
Hook.after_train_epoch(Runner)
scheduler.step.assert_called()
|
"""Ollama specific chat model integration tests"""
from typing import Annotated, Optional
import pytest
from pydantic import BaseModel, Field
from typing_extensions import TypedDict
from langchain_ollama import ChatOllama
DEFAULT_MODEL_NAME = "llama3.1"
@pytest.mark.parametrize(("method"), [("function_calling"), ("json_schema")])
def test_structured_output(method: str) -> None:
"""Test to verify structured output via tool calling and ``format`` parameter."""
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
llm = ChatOllama(model=DEFAULT_MODEL_NAME, temperature=0)
query = "Tell me a joke about cats."
# Pydantic
structured_llm = llm.with_structured_output(Joke, method=method) # type: ignore[arg-type]
result = structured_llm.invoke(query)
assert isinstance(result, Joke)
for chunk in structured_llm.stream(query):
assert isinstance(chunk, Joke)
# JSON Schema
structured_llm = llm.with_structured_output(Joke.model_json_schema(), method=method) # type: ignore[arg-type]
result = structured_llm.invoke(query)
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline"}
for chunk in structured_llm.stream(query):
assert isinstance(chunk, dict)
assert isinstance(chunk, dict)
assert set(chunk.keys()) == {"setup", "punchline"}
# Typed Dict
class JokeSchema(TypedDict):
"""Joke to tell user."""
setup: Annotated[str, "question to set up a joke"]
punchline: Annotated[str, "answer to resolve the joke"]
structured_llm = llm.with_structured_output(JokeSchema, method=method) # type: ignore[arg-type]
result = structured_llm.invoke(query)
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline"}
for chunk in structured_llm.stream(query):
assert isinstance(chunk, dict)
assert isinstance(chunk, dict)
assert set(chunk.keys()) == {"setup", "punchline"}
@pytest.mark.parametrize(("model"), [(DEFAULT_MODEL_NAME)])
def test_structured_output_deeply_nested(model: str) -> None:
"""Test to verify structured output with a nested objects."""
llm = ChatOllama(model=model, temperature=0)
class Person(BaseModel):
"""Information about a person."""
name: Optional[str] = Field(default=None, description="The name of the person")
hair_color: Optional[str] = Field(
default=None, description="The color of the person's hair if known"
)
height_in_meters: Optional[str] = Field(
default=None, description="Height measured in meters"
)
class Data(BaseModel):
"""Extracted data about people."""
people: list[Person]
chat = llm.with_structured_output(Data)
text = (
"Alan Smith is 6 feet tall and has blond hair."
"Alan Poe is 3 feet tall and has grey hair."
)
result = chat.invoke(text)
assert isinstance(result, Data)
for chunk in chat.stream(text):
assert isinstance(chunk, Data)
|
"""Ollama specific chat model integration tests"""
from typing import Annotated, Optional
import pytest
from pydantic import BaseModel, Field
from typing_extensions import TypedDict
from langchain_ollama import ChatOllama
@pytest.mark.parametrize(("method"), [("function_calling"), ("json_schema")])
def test_structured_output(method: str) -> None:
"""Test to verify structured output via tool calling and ``format`` parameter."""
class Joke(BaseModel):
"""Joke to tell user."""
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
llm = ChatOllama(model="llama3.1", temperature=0)
query = "Tell me a joke about cats."
# Pydantic
structured_llm = llm.with_structured_output(Joke, method=method) # type: ignore[arg-type]
result = structured_llm.invoke(query)
assert isinstance(result, Joke)
for chunk in structured_llm.stream(query):
assert isinstance(chunk, Joke)
# JSON Schema
structured_llm = llm.with_structured_output(Joke.model_json_schema(), method=method) # type: ignore[arg-type]
result = structured_llm.invoke(query)
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline"}
for chunk in structured_llm.stream(query):
assert isinstance(chunk, dict)
assert isinstance(chunk, dict) # for mypy
assert set(chunk.keys()) == {"setup", "punchline"}
# Typed Dict
class JokeSchema(TypedDict):
"""Joke to tell user."""
setup: Annotated[str, "question to set up a joke"]
punchline: Annotated[str, "answer to resolve the joke"]
structured_llm = llm.with_structured_output(JokeSchema, method=method) # type: ignore[arg-type]
result = structured_llm.invoke(query)
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline"}
for chunk in structured_llm.stream(query):
assert isinstance(chunk, dict)
assert isinstance(chunk, dict) # for mypy
assert set(chunk.keys()) == {"setup", "punchline"}
@pytest.mark.parametrize(("model"), [("llama3.1")])
def test_structured_output_deeply_nested(model: str) -> None:
"""Test to verify structured output with a nested objects."""
llm = ChatOllama(model=model, temperature=0)
class Person(BaseModel):
"""Information about a person."""
name: Optional[str] = Field(default=None, description="The name of the person")
hair_color: Optional[str] = Field(
default=None, description="The color of the person's hair if known"
)
height_in_meters: Optional[str] = Field(
default=None, description="Height measured in meters"
)
class Data(BaseModel):
"""Extracted data about people."""
people: list[Person]
chat = llm.with_structured_output(Data) # type: ignore[arg-type]
text = (
"Alan Smith is 6 feet tall and has blond hair."
"Alan Poe is 3 feet tall and has grey hair."
)
result = chat.invoke(text)
assert isinstance(result, Data)
for chunk in chat.stream(text):
assert isinstance(chunk, Data)
|
import os
from abc import ABC, abstractmethod
from typing import List, Optional
import fsspec
from llama_index.core.data_structs.data_structs import IndexStruct
DEFAULT_PERSIST_DIR = "./storage"
DEFAULT_PERSIST_FNAME = "index_store.json"
DEFAULT_PERSIST_PATH = os.path.join(DEFAULT_PERSIST_DIR, DEFAULT_PERSIST_FNAME)
class BaseIndexStore(ABC):
@abstractmethod
def index_structs(self) -> List[IndexStruct]:
pass
@abstractmethod
async def async_index_structs(self) -> List[IndexStruct]:
pass
@abstractmethod
def add_index_struct(self, index_struct: IndexStruct) -> None:
pass
@abstractmethod
async def async_add_index_struct(self, index_struct: IndexStruct) -> None:
pass
@abstractmethod
def delete_index_struct(self, key: str) -> None:
pass
@abstractmethod
async def adelete_index_struct(self, key: str) -> None:
pass
@abstractmethod
def get_index_struct(
self, struct_id: Optional[str] = None
) -> Optional[IndexStruct]:
pass
@abstractmethod
async def aget_index_struct(
self, struct_id: Optional[str] = None
) -> Optional[IndexStruct]:
pass
def persist(
self,
persist_path: str = DEFAULT_PERSIST_PATH,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> None:
"""Persist the index store to disk."""
|
import os
from abc import ABC, abstractmethod
from typing import List, Optional
import fsspec
from llama_index.core.data_structs.data_structs import IndexStruct
DEFAULT_PERSIST_DIR = "./storage"
DEFAULT_PERSIST_FNAME = "index_store.json"
DEFAULT_PERSIST_PATH = os.path.join(DEFAULT_PERSIST_DIR, DEFAULT_PERSIST_FNAME)
class BaseIndexStore(ABC):
@abstractmethod
def index_structs(self) -> List[IndexStruct]:
pass
@abstractmethod
def add_index_struct(self, index_struct: IndexStruct) -> None:
pass
@abstractmethod
def delete_index_struct(self, key: str) -> None:
pass
@abstractmethod
def get_index_struct(
self, struct_id: Optional[str] = None
) -> Optional[IndexStruct]:
pass
def persist(
self,
persist_path: str = DEFAULT_PERSIST_PATH,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> None:
"""Persist the index store to disk."""
|
from io import BytesIO
from unittest.mock import MagicMock
from llama_index.core.schema import ImageDocument
from llama_index.multi_modal_llms.gemini.utils import (
generate_gemini_multi_modal_chat_message,
)
def test_generate_message_no_image_documents():
result = generate_gemini_multi_modal_chat_message(
prompt="Hello", role="user", image_documents=None
)
assert result.role == "user"
assert result.content == "Hello"
assert result.additional_kwargs == {}
def test_generate_message_empty_image_documents():
result = generate_gemini_multi_modal_chat_message(
prompt="Hello", role="user", image_documents=[]
)
assert result.role == "user"
assert result.content == "Hello"
assert result.additional_kwargs == {}
def test_generate_message_with_image_documents():
image1 = MagicMock(spec=ImageDocument)
image1.resolve_image.return_value = BytesIO(b"foo")
image2 = MagicMock(spec=ImageDocument)
image2.resolve_image.return_value = BytesIO(b"bar")
image_documents = [image1, image2]
result = generate_gemini_multi_modal_chat_message(
prompt="Hello", role="user", image_documents=image_documents
)
assert result.role == "user"
assert result.content == "Hello"
assert result.additional_kwargs == {"images": image_documents}
|
from unittest.mock import MagicMock
from llama_index.core.schema import ImageDocument
from llama_index.multi_modal_llms.gemini.utils import (
generate_gemini_multi_modal_chat_message,
)
def test_generate_message_no_image_documents():
result = generate_gemini_multi_modal_chat_message(
prompt="Hello", role="user", image_documents=None
)
assert result.role == "user"
assert result.content == "Hello"
assert result.additional_kwargs == {}
def test_generate_message_empty_image_documents():
result = generate_gemini_multi_modal_chat_message(
prompt="Hello", role="user", image_documents=[]
)
assert result.role == "user"
assert result.content == "Hello"
assert result.additional_kwargs == {}
def test_generate_message_with_image_documents():
image1 = MagicMock(spec=ImageDocument)
image2 = MagicMock(spec=ImageDocument)
image_documents = [image1, image2]
result = generate_gemini_multi_modal_chat_message(
prompt="Hello", role="user", image_documents=image_documents
)
assert result.role == "user"
assert result.content == "Hello"
assert result.additional_kwargs == {"images": image_documents}
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 48
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[32, 44],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 48
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[32, 44],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
MaxIoUAssigner, RegionAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder,
PseudoBBoxCoder, TBLRBBoxCoder)
from .iou_calculators import BboxOverlaps2D, bbox_overlaps
from .samplers import (BaseSampler, CombinedSampler,
InstanceBalancedPosSampler, IoUBalancedNegSampler,
OHEMSampler, PseudoSampler, RandomSampler,
SamplingResult, ScoreHLRSampler)
from .transforms import (bbox2corner, bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_project, bbox_rescale,
bbox_xyxy_to_cxcywh, corner2bbox, distance2bbox,
find_inside_bboxes, roi2bbox)
__all__ = [
'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder',
'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy',
'bbox_xyxy_to_cxcywh', 'RegionAssigner', 'find_inside_bboxes',
'bbox2corner', 'corner2bbox', 'bbox_project'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
MaxIoUAssigner, RegionAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder,
PseudoBBoxCoder, TBLRBBoxCoder)
from .iou_calculators import BboxOverlaps2D, bbox_overlaps
from .samplers import (BaseSampler, CombinedSampler,
InstanceBalancedPosSampler, IoUBalancedNegSampler,
OHEMSampler, PseudoSampler, RandomSampler,
SamplingResult, ScoreHLRSampler)
from .transforms import (bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh,
distance2bbox, find_inside_bboxes, roi2bbox)
__all__ = [
'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder',
'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy',
'bbox_xyxy_to_cxcywh', 'RegionAssigner', 'find_inside_bboxes'
]
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from text_paddle import TextPaddleEncoder
@pytest.fixture(scope='function')
def flow():
return Flow().add(uses=TextPaddleEncoder)
@pytest.fixture(scope='function')
def content():
return 'hello world'
@pytest.fixture(scope='function')
def document_array(content):
return DocumentArray([Document(content=content)])
def validate_callback(mock, validate_func):
for args, kwargs in mock.call_args_list:
validate_func(*args, **kwargs)
mock.assert_called()
@pytest.mark.parametrize(
'parameters',
[
{'traverse_paths': ['r'], 'batch_size': 10},
{'traverse_paths': ['m'], 'batch_size': 10},
{'traverse_paths': ['r', 'c'], 'batch_size': 5},
],
)
def test_text_paddle(flow, content, document_array, parameters):
def validate(resp):
for doc in resp.docs:
assert doc.embedding.shape == (1024,)
assert doc.embedding.all()
with flow as f:
results = f.index(inputs=document_array, return_results=True)
validate(results[0])
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'use_gpu:true',
],
timeout=30,
check=True,
)
|
import pytest
from jina import Document, DocumentArray, Flow
from text_paddle import TextPaddleEncoder
@pytest.fixture(scope='function')
def flow():
return Flow().add(uses=TextPaddleEncoder)
@pytest.fixture(scope='function')
def content():
return 'hello world'
@pytest.fixture(scope='function')
def document_array(content):
return DocumentArray([Document(content=content)])
def validate_callback(mock, validate_func):
for args, kwargs in mock.call_args_list:
validate_func(*args, **kwargs)
mock.assert_called()
@pytest.mark.parametrize(
'parameters',
[
{'traverse_paths': ['r'], 'batch_size': 10},
{'traverse_paths': ['m'], 'batch_size': 10},
{'traverse_paths': ['r', 'c'], 'batch_size': 5},
],
)
def test_text_paddle(flow, content, document_array, parameters, mocker):
def validate(resp):
for doc in resp.docs:
assert doc.embedding.shape == (1024,)
assert doc.embedding.all()
mock_on_done = mocker.Mock()
with flow as f:
f.index(inputs=document_array, on_done=mock_on_done)
validate_callback(mock_on_done, validate)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.