input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
)
import numpy as np
from ..base.backend import BaseBackendMixin
from ....helper import dataclass_from_dict, filter_dict
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
**kwargs,
):
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
from .... import Document
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
)
import numpy as np
from ..base.backend import BaseBackendMixin
from ....helper import dataclass_from_dict
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
**kwargs,
):
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **config)
from .... import Document
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **config)
|
"""Standard LangChain interface tests."""
from langchain_core.embeddings import Embeddings
from langchain_tests.unit_tests.embeddings import EmbeddingsUnitTests
from langchain_fireworks import FireworksEmbeddings
class TestFireworksStandard(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> type[Embeddings]:
return FireworksEmbeddings
@property
def embeddings_params(self) -> dict:
return {"api_key": "test_api_key"}
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return (
{
"FIREWORKS_API_KEY": "api_key",
},
{},
{
"fireworks_api_key": "api_key",
},
)
|
"""Standard LangChain interface tests"""
from langchain_core.embeddings import Embeddings
from langchain_tests.unit_tests.embeddings import EmbeddingsUnitTests
from langchain_fireworks import FireworksEmbeddings
class TestFireworksStandard(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> type[Embeddings]:
return FireworksEmbeddings
@property
def embeddings_params(self) -> dict:
return {"api_key": "test_api_key"}
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return (
{
"FIREWORKS_API_KEY": "api_key",
},
{},
{
"fireworks_api_key": "api_key",
},
)
|
import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_batch("hist", "cuda")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
def test_quantile_objective() -> None:
with pytest.raises(ValueError, match="external memory"):
check_quantile_loss_extmem(2, 2, 2, "hist", "cuda")
|
import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_batch("gpu_hist")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
def test_quantile_objective() -> None:
with pytest.raises(ValueError, match="external memory"):
check_quantile_loss_extmem(2, 2, 2, "hist", "cuda")
|
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. For the example, we use a batch_size of 128, a max sentence length (max_seq_length)
# of 32 word pieces and as model roberta-base
model_name = "roberta-base"
batch_size = 128
max_seq_length = 32
num_epochs = 1
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "data/askubuntu"
output_path = "output/askubuntu-simcse-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info(f"{len(train_sentences)} train sentences")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# As Loss function, we use MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Dev performance before training")
dev_evaluator(model)
warmup_steps = int(num_epochs * len(train_dataloader) * 0.1)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=100,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=output_path,
show_progress_bar=True,
use_amp=True, # If your GPU does not have FP16 cores, set use_amp=False
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. For the example, we use a batch_size of 128, a max sentence length (max_seq_length)
# of 32 word pieces and as model roberta-base
model_name = "roberta-base"
batch_size = 128
max_seq_length = 32
num_epochs = 1
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "data/askubuntu"
output_path = "output/askubuntu-simcse-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# As Loss function, we use MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Dev performance before training")
dev_evaluator(model)
warmup_steps = int(num_epochs * len(train_dataloader) * 0.1)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=100,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=output_path,
show_progress_bar=True,
use_amp=True, # If your GPU does not have FP16 cores, set use_amp=False
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
from typing import Optional
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE
from llama_index.storage.kvstore.gel import GelKVStore
class GelDocumentStore(KVDocumentStore):
"""
Gel Document (Node) store.
A Gel store for Document and Node objects.
Args:
gel_kvstore (GelKVStore): Gel key-value store
namespace (str): namespace for the docstore
batch_size (int): batch size for bulk operations
"""
def __init__(
self,
gel_kvstore: GelKVStore,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a GelDocumentStore."""
super().__init__(gel_kvstore, namespace=namespace, batch_size=batch_size)
|
from typing import Optional
from llama_index.core.storage.docstore.keyval_docstore import KVDocumentStore
from llama_index.core.storage.docstore.types import DEFAULT_BATCH_SIZE
from llama_index.storage.kvstore.gel import GelKVStore
class GelDocumentStore(KVDocumentStore):
"""Gel Document (Node) store.
A Gel store for Document and Node objects.
Args:
gel_kvstore (GelKVStore): Gel key-value store
namespace (str): namespace for the docstore
batch_size (int): batch size for bulk operations
"""
def __init__(
self,
gel_kvstore: GelKVStore,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a GelDocumentStore."""
super().__init__(gel_kvstore, namespace=namespace, batch_size=batch_size)
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.computation import AbstractComputationalBackend
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __docarray_validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form Tensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type Tensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __docarray_validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form Tensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _docarray_create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__docarray_validate_shape__(t, _cls._docarray_target_shape)
_ParametrizedTensor.__name__ = f'{cls.__name__}[{shape_str}]'
_ParametrizedTensor.__qualname__ = f'{cls.__qualname__}[{shape_str}]'
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__docarray_validate_getitem__(item)
return cls._docarray_create_parametrized_type(target_shape)
@classmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
comp_backend = cls.get_comp_backend()
# at runtime, 'T' is always the correct input type for .stack()
# but mypy doesn't know that, so we ignore it here
return cls.__docarray_from_native__(comp_backend.stack(seq)) # type: ignore
@classmethod
@abc.abstractmethod
def __docarray_from_native__(cls: Type[T], value: Any) -> T:
"""
Create a DocArray tensor from a tensor that is native to the given framework,
e.g. from numpy.ndarray or torch.Tensor.
"""
...
@staticmethod
@abc.abstractmethod
def get_comp_backend() -> Type[AbstractComputationalBackend]:
"""The computational backend compatible with this tensor type."""
...
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __docarray_validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form Tensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type Tensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __docarray_validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form Tensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _docarray_create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__docarray_validate_shape__(t, _cls._docarray_target_shape)
_ParametrizedTensor.__name__ = f'{cls.__name__}[{shape_str}]'
_ParametrizedTensor.__qualname__ = f'{cls.__qualname__}[{shape_str}]'
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__docarray_validate_getitem__(item)
return cls._docarray_create_parametrized_type(target_shape)
@classmethod
@abc.abstractmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
...
|
from llama_index_instrumentation import (
DispatcherSpanMixin, # noqa
get_dispatcher, # noqa
root_dispatcher, # noqa
root_manager, # noqa
)
from llama_index_instrumentation.dispatcher import (
DISPATCHER_SPAN_DECORATED_ATTR, # noqa
Dispatcher, # noqa
Manager, # noqa
)
from llama_index_instrumentation.event_handlers import NullEventHandler # noqa
from llama_index_instrumentation.span_handlers import NullSpanHandler # noqa
|
import inspect
from abc import ABC
from typing import Any, List
from llama_index.core.instrumentation.dispatcher import (
Dispatcher,
Manager,
DISPATCHER_SPAN_DECORATED_ATTR,
)
from llama_index.core.instrumentation.event_handlers import NullEventHandler
from llama_index.core.instrumentation.span_handlers import NullSpanHandler
root_dispatcher: Dispatcher = Dispatcher(
name="root",
event_handlers=[NullEventHandler()],
span_handlers=[NullSpanHandler()],
propagate=False,
)
root_manager: Manager = Manager(root_dispatcher)
def get_dispatcher(name: str = "root") -> Dispatcher:
"""Module method that should be used for creating a new Dispatcher."""
if name in root_manager.dispatchers:
return root_manager.dispatchers[name]
candidate_parent_name = ".".join(name.split(".")[:-1])
if candidate_parent_name in root_manager.dispatchers:
parent_name = candidate_parent_name
else:
parent_name = "root"
new_dispatcher = Dispatcher(
name=name,
root_name=root_dispatcher.name,
parent_name=parent_name,
manager=root_manager,
)
root_manager.add_dispatcher(new_dispatcher)
return new_dispatcher
class DispatcherSpanMixin(ABC):
"""
Apply the `dispatcher.span` decorator to implementations of abstract methods, as well
as any methods previously decorated (in any base class) that are being overridden by
a subclass. For example, if class `A` has abstract method `f`, and class `B` inherits
from `A` and provides an implementation of `f`, then `B.f` will be decorated by the mixin.
Furthermore, if `B` has a non-abstract method `g` that is decorated by `dispatcher.span`
and new class `C` inherits from `B` and overrides `g`, then `C.g` will also be decorated
by the mixin. Note that users can still manually apply `dispatcher.span` to the methods
in their custom subclasses without creating duplicate spans because the `dispatcher.span`
decorator should be idempotent.
"""
def __init_subclass__(cls, **kwargs: Any) -> None:
super().__init_subclass__(**kwargs)
abstract_methods: List[str] = []
decorated_methods: List[str] = []
for base_cls in inspect.getmro(cls):
if base_cls is cls:
continue
for attr, method in base_cls.__dict__.items():
if not callable(method):
continue
if (
hasattr(method, "__isabstractmethod__")
and method.__isabstractmethod__
):
abstract_methods.append(attr)
elif hasattr(method, DISPATCHER_SPAN_DECORATED_ATTR):
decorated_methods.append(attr)
dispatcher = get_dispatcher(cls.__module__)
for attr, method in cls.__dict__.items():
if (
not callable(method)
or hasattr(method, "__isabstractmethod__")
and method.__isabstractmethod__
):
continue
if attr in abstract_methods or attr in decorated_methods:
setattr(cls, attr, dispatcher.span(method))
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import RPNHead
class TestRPNHead(TestCase):
def test_init(self):
"""Test init rpn head."""
rpn_head = RPNHead(num_classes=1, in_channels=1)
assert rpn_head.rpn_conv
assert rpn_head.rpn_cls
assert rpn_head.rpn_reg
# rpn_head.num_convs > 1
rpn_head = RPNHead(num_classes=1, in_channels=1, num_convs=2)
assert rpn_head.rpn_conv
assert rpn_head.rpn_cls
assert rpn_head.rpn_reg
def test_rpn_head_loss(self):
"""Tests rpn head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
rpn_head = RPNHead(num_classes=1, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(rpn_head.prior_generator.strides)))
cls_scores, bbox_preds = rpn_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = rpn_head.loss(cls_scores, bbox_preds, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_rpn_cls'])
empty_box_loss = sum(empty_gt_losses['loss_rpn_bbox'])
assert empty_cls_loss.item() > 0, 'rpn cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
one_gt_losses = rpn_head.loss(cls_scores, bbox_preds, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_rpn_cls'])
onegt_box_loss = sum(one_gt_losses['loss_rpn_bbox'])
assert onegt_cls_loss.item() > 0, 'rpn cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'rpn box loss should be non-zero'
# When there is no valid anchor, the loss will be None,
# and this will raise a ValueError.
img_metas = [{
'img_shape': (8, 8, 3),
'scale_factor': 1,
}]
with pytest.raises(ValueError):
rpn_head.loss(cls_scores, bbox_preds, [gt_instances], img_metas)
def test_bbox_post_process(self):
"""Test the length of detection instance results is 0."""
from mmengine.config import ConfigDict
cfg = ConfigDict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)
rpn_head = RPNHead(num_classes=1, in_channels=1)
results = InstanceData(metainfo=dict())
results.bboxes = torch.zeros((0, 4))
results.scores = torch.zeros(0)
results = rpn_head._bbox_post_process(results, cfg, img_meta=dict())
assert len(results) == 0
assert results.bboxes.size() == (0, 4)
assert results.scores.size() == (0, )
assert results.labels.size() == (0, )
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import RPNHead
class TestRPNHead(TestCase):
def test_rpn_head_loss(self):
"""Tests rpn head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
cfg = Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
rpn_head = RPNHead(num_classes=1, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(rpn_head.prior_generator.strides)))
cls_scores, bbox_preds = rpn_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = rpn_head.loss(cls_scores, bbox_preds, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_rpn_cls'])
empty_box_loss = sum(empty_gt_losses['loss_rpn_bbox'])
assert empty_cls_loss.item() > 0, 'rpn cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
one_gt_losses = rpn_head.loss(cls_scores, bbox_preds, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_rpn_cls'])
onegt_box_loss = sum(one_gt_losses['loss_rpn_bbox'])
assert onegt_cls_loss.item() > 0, 'rpn cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'rpn box loss should be non-zero'
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_global_accsessible import BaseGlobalAccessible, MetaGlobalAccessible
from .log_buffer import LogBuffer
from .logger import MMLogger, print_log
from .message_hub import MessageHub
__all__ = [
'LogBuffer', 'MessageHub', 'MetaGlobalAccessible', 'BaseGlobalAccessible',
'MMLogger', 'print_log'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_global_accsessible import BaseGlobalAccessible, MetaGlobalAccessible
__all__ = ['MetaGlobalAccessible', 'BaseGlobalAccessible']
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
import os
from pathlib import Path
import numpy as np
import pytest
from PIL.Image import Image, fromarray
from jina import DocumentArray, Document, Executor
from ...normalizer import ImageNormalizer
@pytest.fixture
def numpy_image_uri(tmpdir):
blob = np.random.randint(255, size=(96, 96, 3), dtype='uint8')
im = fromarray(blob)
uri = os.path.join(tmpdir, 'tmp.png')
im.save(uri)
return uri
@pytest.fixture
def test_image_uri_doc(numpy_image_uri):
return Document(uri=numpy_image_uri)
@pytest.fixture
def test_image_buffer_doc(numpy_image_uri):
doc = Document(uri=numpy_image_uri)
doc.convert_uri_to_buffer()
return doc
@pytest.fixture
def test_image_blob_doc(numpy_image_uri):
doc = Document(uri=numpy_image_uri)
doc.convert_image_uri_to_blob()
return doc
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.target_size == 224
def test_initialization():
norm = ImageNormalizer()
assert norm.target_size == 224
norm = ImageNormalizer(
target_size=96,
img_mean=(1.0, 2.0, 3.0),
img_std=(2.0, 2.0, 2.0),
resize_dim=256,
channel_axis=4,
target_channel_axis=5,
target_dtype=np.uint8,
)
assert norm.target_size == 96
assert np.array_equal(norm.img_std, [[[2, 2, 2]]])
assert np.array_equal(norm.img_mean, [[[1, 2, 3]]])
assert norm.resize_dim == 256
assert norm.channel_axis == 4
assert norm.target_channel_axis == 5
assert norm.target_dtype == np.uint8
def test_convert_image_to_blob(
test_image_uri_doc, test_image_buffer_doc, test_image_blob_doc
):
norm = ImageNormalizer(
resize_dim=123, img_mean=(0.1, 0.1, 0.1), img_std=(0.5, 0.5, 0.5)
)
docs = DocumentArray(
[test_image_uri_doc, test_image_buffer_doc, test_image_blob_doc]
)
assert docs[0].blob is None and docs[1].blob is None
for doc in docs:
norm._convert_image_to_blob(doc)
assert len(docs) == 3
for doc in docs:
assert np.array_equal(doc.blob, test_image_blob_doc.blob)
@pytest.mark.parametrize('dtype_conversion', [np.uint8, np.float32, np.float64])
@pytest.mark.parametrize('manual_convert', [True, False])
def test_crafting_image(test_image_uri_doc, manual_convert, dtype_conversion):
doc = Document(test_image_uri_doc, copy=True)
doc.convert_image_uri_to_blob()
norm = ImageNormalizer(
resize_dim=123,
img_mean=(0.1, 0.1, 0.1),
img_std=(0.5, 0.5, 0.5),
target_dtype=dtype_conversion,
)
assert norm.target_dtype == dtype_conversion
img = norm._load_image(doc.blob)
assert isinstance(img, Image)
assert img.size == (96, 96)
img_resized = norm._resize_short(img)
assert img_resized.size == (123, 123)
assert isinstance(img_resized, Image)
norm.resize_dim = (123, 456)
img_resized = norm._resize_short(img)
assert img_resized.size == (123, 456)
assert isinstance(img_resized, Image)
with pytest.raises(ValueError):
norm.resize_dim = (1, 2, 3)
norm._resize_short(img)
norm.resize_dim = 256
img = norm._resize_short(img)
norm.target_size = 128
cropped_img, b1, b2 = norm._crop_image(img, how='random')
assert cropped_img.size == (128, 128)
assert isinstance(cropped_img, Image)
norm.target_size = 224
img, b1, b2 = norm._crop_image(img, how='center')
assert img.size == (224, 224)
assert isinstance(img, Image)
assert b1 == 16
assert b2 == 16
img = np.asarray(img).astype('float32') / 255
norm_img = norm._normalize(norm._load_image(doc.blob))
img -= np.array([[[0.1, 0.1, 0.1]]])
img /= np.array([[[0.5, 0.5, 0.5]]])
assert np.array_equal(norm_img, img)
if manual_convert:
docs = DocumentArray([doc])
else:
docs = DocumentArray([test_image_uri_doc])
processed_docs = norm.craft(docs)
assert np.array_equal(processed_docs[0].blob, img.astype(dtype_conversion))
for doc in processed_docs:
assert doc.blob.dtype == dtype_conversion
def test_move_channel_axis(test_image_uri_doc):
norm = ImageNormalizer(channel_axis=2, target_channel_axis=0)
doc = test_image_uri_doc
doc.convert_image_uri_to_blob()
img = norm._load_image(doc.blob)
assert img.size == (96, 96)
channel0_img = norm._move_channel_axis(doc.blob, 2, 0)
assert channel0_img.shape == (3, 96, 96)
processed_docs = norm.craft(DocumentArray([doc]))
assert processed_docs[0].blob.shape == (3, 224, 224)
|
import os
import numpy as np
import pytest
from PIL.Image import Image, fromarray
from jina import DocumentArray, Document
from ...normalizer import ImageNormalizer
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def numpy_image_uri(tmpdir):
blob = np.random.randint(255, size=(96, 96, 3), dtype='uint8')
im = fromarray(blob)
uri = os.path.join(tmpdir, 'tmp.png')
im.save(uri)
return uri
@pytest.fixture
def test_image_uri_doc(numpy_image_uri):
return Document(uri=numpy_image_uri)
@pytest.fixture
def test_image_buffer_doc(numpy_image_uri):
doc = Document(uri=numpy_image_uri)
doc.convert_uri_to_buffer()
return doc
@pytest.fixture
def test_image_blob_doc(numpy_image_uri):
doc = Document(uri=numpy_image_uri)
doc.convert_image_uri_to_blob()
return doc
def test_initialization():
norm = ImageNormalizer()
assert norm.target_size == 224
norm = ImageNormalizer(
target_size=96,
img_mean=(1.0, 2.0, 3.0),
img_std=(2.0, 2.0, 2.0),
resize_dim=256,
channel_axis=4,
target_channel_axis=5,
target_dtype=np.uint8
)
assert norm.target_size == 96
assert np.array_equal(norm.img_std, [[[2, 2, 2]]])
assert np.array_equal(norm.img_mean, [[[1, 2, 3]]])
assert norm.resize_dim == 256
assert norm.channel_axis == 4
assert norm.target_channel_axis == 5
assert norm.target_dtype == np.uint8
def test_convert_image_to_blob(
test_image_uri_doc, test_image_buffer_doc, test_image_blob_doc
):
norm = ImageNormalizer(
resize_dim=123, img_mean=(0.1, 0.1, 0.1), img_std=(0.5, 0.5, 0.5)
)
docs = DocumentArray(
[test_image_uri_doc, test_image_buffer_doc, test_image_blob_doc]
)
assert docs[0].blob is None and docs[1].blob is None
for doc in docs:
norm._convert_image_to_blob(doc)
assert len(docs) == 3
for doc in docs:
assert np.array_equal(doc.blob, test_image_blob_doc.blob)
@pytest.mark.parametrize('dtype_conversion', [np.uint8, np.float32, np.float64])
@pytest.mark.parametrize('manual_convert', [True, False])
def test_crafting_image(test_image_uri_doc, manual_convert, dtype_conversion):
doc = Document(test_image_uri_doc, copy=True)
doc.convert_image_uri_to_blob()
norm = ImageNormalizer(
resize_dim=123, img_mean=(0.1, 0.1, 0.1), img_std=(0.5, 0.5, 0.5), target_dtype=dtype_conversion
)
assert norm.target_dtype == dtype_conversion
img = norm._load_image(doc.blob)
assert isinstance(img, Image)
assert img.size == (96, 96)
img_resized = norm._resize_short(img)
assert img_resized.size == (123, 123)
assert isinstance(img_resized, Image)
norm.resize_dim = (123, 456)
img_resized = norm._resize_short(img)
assert img_resized.size == (123, 456)
assert isinstance(img_resized, Image)
with pytest.raises(ValueError):
norm.resize_dim = (1, 2, 3)
norm._resize_short(img)
norm.resize_dim = 256
img = norm._resize_short(img)
norm.target_size = 128
cropped_img, b1, b2 = norm._crop_image(img, how='random')
assert cropped_img.size == (128, 128)
assert isinstance(cropped_img, Image)
norm.target_size = 224
img, b1, b2 = norm._crop_image(img, how='center')
assert img.size == (224, 224)
assert isinstance(img, Image)
assert b1 == 16
assert b2 == 16
img = np.asarray(img).astype('float32') / 255
norm_img = norm._normalize(norm._load_image(doc.blob))
img -= np.array([[[0.1, 0.1, 0.1]]])
img /= np.array([[[0.5, 0.5, 0.5]]])
assert np.array_equal(norm_img, img)
if manual_convert:
docs = DocumentArray([doc])
else:
docs = DocumentArray([test_image_uri_doc])
processed_docs = norm.craft(docs)
assert np.array_equal(processed_docs[0].blob, img.astype(dtype_conversion))
for doc in processed_docs:
assert doc.blob.dtype == dtype_conversion
def test_move_channel_axis(test_image_uri_doc):
norm = ImageNormalizer(channel_axis=2, target_channel_axis=0)
doc = test_image_uri_doc
doc.convert_image_uri_to_blob()
img = norm._load_image(doc.blob)
assert img.size == (96, 96)
channel0_img = norm._move_channel_axis(doc.blob, 2, 0)
assert channel0_img.shape == (3, 96, 96)
processed_docs = norm.craft(DocumentArray([doc]))
assert processed_docs[0].blob.shape == (3, 224, 224)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
from mmdet.models.dense_heads import PAAHead, paa_head
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_paa_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
paa_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, iou_preds = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_iou_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_iou_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
assert self.with_score_voting
self = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self.get_bboxes(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
from mmdet.models.dense_heads import PAAHead, paa_head
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_paa_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
paa_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, iou_preds = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_iou_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_iou_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
assert self.with_score_voting
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self.get_bboxes(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
|
"""Simple Reader that loads highlights from Readwise.io."""
import datetime
import json
from typing import List, Optional
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
def _get_readwise_data(api_key: str, updated_after: Optional[datetime.datetime] = None):
"""
Uses Readwise's export API to export all highlights, optionally after a specified date.
See https://readwise.io/api_deets for details.
Args:
updated_after (datetime.datetime): The datetime to load highlights after. Useful for updating indexes over time.
"""
result = []
next_page = None
while True:
response = requests.get(
url="https://readwise.io/api/v2/export/",
params={
"pageCursor": next_page,
"updatedAfter": updated_after.isoformat() if updated_after else None,
},
headers={"Authorization": f"Token {api_key}"},
)
response.raise_for_status()
result.extend(response.json()["results"])
next_page = response.json().get("nextPageCursor")
if not next_page:
break
return result
class ReadwiseReader(BaseReader):
"""
Reader for Readwise highlights.
"""
def __init__(self, api_key: str):
self._api_key = api_key
def load_data(
self,
updated_after: Optional[datetime.datetime] = None,
) -> List[Document]:
"""
Load your Readwise.io highlights.
Args:
updated_after (datetime.datetime): The datetime to load highlights after. Useful for updating indexes over time.
"""
readwise_response = _get_readwise_data(
api_key=self._api_key, updated_after=updated_after
)
return [Document(text=json.dumps(d)) for d in readwise_response]
|
"""Simple Reader that loads highlights from Readwise.io."""
import datetime
import json
from typing import List, Optional
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
def _get_readwise_data(api_key: str, updated_after: Optional[datetime.datetime] = None):
"""
Uses Readwise's export API to export all highlights, optionally after a specified date.
See https://readwise.io/api_deets for details.
Args:
updated_after (datetime.datetime): The datetime to load highlights after. Useful for updating indexes over time.
"""
result = []
next_page = None
while True:
response = requests.get(
url="https://readwise.io/api/v2/export/",
params={
"pageCursor": next_page,
"updatedAfter": updated_after.isoformat() if updated_after else None,
},
headers={"Authorization": f"Token {api_key}"},
)
response.raise_for_status()
result.extend(response.json()["results"])
next_page = response.json().get("nextPageCursor")
if not next_page:
break
return result
class ReadwiseReader(BaseReader):
"""
Reader for Readwise highlights.
"""
def __init__(self, api_key: str):
self._api_key = api_key
def load_data(
self,
updated_after: Optional[datetime.datetime] = None,
) -> List[Document]:
"""
Load your Readwise.io highlights.
Args:
updated_after (datetime.datetime): The datetime to load highlights after. Useful for updating indexes over time.
"""
readwise_response = _get_readwise_data(
api_key=self._api_key, updated_after=updated_after
)
return [Document(text=json.dumps(d)) for d in readwise_response]
|
from setuptools import find_packages, setup
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="3.1.0.dev0",
author="Nils Reimers, Tom Aarsen",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.38.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy<2.0.0",
"scikit-learn",
"scipy",
"huggingface-hub>=0.19.3",
"Pillow",
],
extras_require={
"train": [
"datasets",
"accelerate>=0.20.3",
],
"dev": ["datasets", "accelerate>=0.20.3", "pre-commit", "pytest"],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
from setuptools import find_packages, setup
with open("README.md", mode="r", encoding="utf-8") as readme_file:
readme = readme_file.read()
setup(
name="sentence-transformers",
version="3.1.0.dev0",
author="Nils Reimers, Tom Aarsen",
author_email="info@nils-reimers.de",
description="Multilingual text embeddings",
long_description=readme,
long_description_content_type="text/markdown",
license="Apache License 2.0",
url="https://www.SBERT.net",
download_url="https://github.com/UKPLab/sentence-transformers/",
packages=find_packages(),
include_package_data=True,
python_requires=">=3.8.0",
install_requires=[
"transformers>=4.38.0,<5.0.0",
"tqdm",
"torch>=1.11.0",
"numpy<2.0.0",
"scikit-learn",
"scipy",
"huggingface-hub>=0.19.3",
"Pillow",
],
extras_require={
"train": [
"datasets",
"accelerate>=0.20.3",
],
"dev": [
"datasets",
"accelerate>=0.20.3",
"pre-commit",
"pytest",
"ruff>=0.3.0",
],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="Transformer Networks BERT XLNet sentence embedding PyTorch NLP deep learning",
)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
BaseSparkSQLTool,
InfoSparkSQLTool,
ListSparkSQLTool,
QueryCheckerTool,
QuerySparkSQLTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseSparkSQLTool": "langchain_community.tools",
"QuerySparkSQLTool": "langchain_community.tools",
"InfoSparkSQLTool": "langchain_community.tools",
"ListSparkSQLTool": "langchain_community.tools",
"QueryCheckerTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseSparkSQLTool",
"InfoSparkSQLTool",
"ListSparkSQLTool",
"QueryCheckerTool",
"QuerySparkSQLTool",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import (
BaseSparkSQLTool,
InfoSparkSQLTool,
ListSparkSQLTool,
QueryCheckerTool,
QuerySparkSQLTool,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseSparkSQLTool": "langchain_community.tools",
"QuerySparkSQLTool": "langchain_community.tools",
"InfoSparkSQLTool": "langchain_community.tools",
"ListSparkSQLTool": "langchain_community.tools",
"QueryCheckerTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseSparkSQLTool",
"QuerySparkSQLTool",
"InfoSparkSQLTool",
"ListSparkSQLTool",
"QueryCheckerTool",
]
|
import numpy as np
from absl.testing import parameterized
from keras.src import dtype_policies
from keras.src import layers
from keras.src import testing
class ZeroPadding1DTest(testing.TestCase, parameterized.TestCase):
def test_zero_padding_1d(self):
inputs = np.random.rand(1, 2, 3)
outputs = layers.ZeroPadding1D(padding=(1, 2))(inputs)
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, :], inputs)
@parameterized.named_parameters(("one_tuple", (2, 2)), ("one_int", 2))
def test_zero_padding_1d_with_same_padding(self, padding):
inputs = np.random.rand(1, 2, 3)
outputs = layers.ZeroPadding1D(padding=padding)(inputs)
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, :], inputs)
def test_zero_padding_1d_with_dynamic_spatial_dim(self):
input_layer = layers.Input(batch_shape=(1, None, 3))
padded = layers.ZeroPadding1D((1, 2))(input_layer)
self.assertEqual(padded.shape, (1, None, 3))
input_layer = layers.Input(batch_shape=(1, 2, 3))
padded = layers.ZeroPadding1D((1, 2))(input_layer)
self.assertEqual(padded.shape, (1, 5, 3))
@parameterized.parameters(
{"padding": (1,)},
{"padding": (1, 2, 3)},
{"padding": "1"},
)
def test_zero_padding_1d_errors_if_padding_argument_invalid(self, padding):
with self.assertRaises(ValueError):
layers.ZeroPadding1D(padding)
def test_zero_padding_1d_get_config(self):
layer = layers.ZeroPadding1D(padding=(1, 2))
expected_config = {
"dtype": dtype_policies.serialize(layer.dtype_policy),
"name": layer.name,
"padding": (1, 2),
"trainable": layer.trainable,
}
self.assertEqual(layer.get_config(), expected_config)
|
import numpy as np
from absl.testing import parameterized
from keras.src import layers
from keras.src import testing
class ZeroPadding1DTest(testing.TestCase, parameterized.TestCase):
def test_zero_padding_1d(self):
inputs = np.random.rand(1, 2, 3)
outputs = layers.ZeroPadding1D(padding=(1, 2))(inputs)
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, :], inputs)
@parameterized.named_parameters(("one_tuple", (2, 2)), ("one_int", 2))
def test_zero_padding_1d_with_same_padding(self, padding):
inputs = np.random.rand(1, 2, 3)
outputs = layers.ZeroPadding1D(padding=padding)(inputs)
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, :], inputs)
def test_zero_padding_1d_with_dynamic_spatial_dim(self):
input_layer = layers.Input(batch_shape=(1, None, 3))
padded = layers.ZeroPadding1D((1, 2))(input_layer)
self.assertEqual(padded.shape, (1, None, 3))
input_layer = layers.Input(batch_shape=(1, 2, 3))
padded = layers.ZeroPadding1D((1, 2))(input_layer)
self.assertEqual(padded.shape, (1, 5, 3))
@parameterized.parameters(
{"padding": (1,)},
{"padding": (1, 2, 3)},
{"padding": "1"},
)
def test_zero_padding_1d_errors_if_padding_argument_invalid(self, padding):
with self.assertRaises(ValueError):
layers.ZeroPadding1D(padding)
def test_zero_padding_1d_get_config(self):
layer = layers.ZeroPadding1D(padding=(1, 2))
expected_config = {
"dtype": layer.dtype_policy.name,
"name": layer.name,
"padding": (1, 2),
"trainable": layer.trainable,
}
self.assertEqual(layer.get_config(), expected_config)
|
import gc
import unittest
import torch
from diffusers import (
StableDiffusionInpaintPipeline,
)
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
from .single_file_testing_utils import SDSingleFileTesterMixin
enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionInpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionInpaintPipeline
ckpt_path = "https://huggingface.co/botp/stable-diffusion-v1-5-inpainting/blob/main/sd-v1-5-inpainting.ckpt"
original_config = "https://raw.githubusercontent.com/runwayml/stable-diffusion/main/configs/stable-diffusion/v1-inpainting-inference.yaml"
repo_id = "botp/stable-diffusion-v1-5-inpainting"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
init_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_inpaint/input_bench_image.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_inpaint/input_bench_mask.png"
)
inputs = {
"prompt": "Face of a yellow cat, high resolution, sitting on a park bench",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
def test_single_file_loading_4_channel_unet(self):
# Test loading single file inpaint with a 4 channel UNet
ckpt_path = "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors"
pipe = self.pipeline_class.from_single_file(ckpt_path)
assert pipe.unet.config.in_channels == 4
@unittest.skip("runwayml original config has been removed")
def test_single_file_components_with_original_config(self):
return
@unittest.skip("runwayml original config has been removed")
def test_single_file_components_with_original_config_local_files_only(self):
return
@slow
@require_torch_accelerator
class StableDiffusion21InpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionInpaintPipeline
ckpt_path = (
"https://huggingface.co/stabilityai/stable-diffusion-2-inpainting/blob/main/512-inpainting-ema.safetensors"
)
original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inpainting-inference.yaml"
repo_id = "stabilityai/stable-diffusion-2-inpainting"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
init_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_inpaint/input_bench_image.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_inpaint/input_bench_mask.png"
)
inputs = {
"prompt": "Face of a yellow cat, high resolution, sitting on a park bench",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
|
import gc
import unittest
import torch
from diffusers import (
StableDiffusionInpaintPipeline,
)
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
slow,
)
from .single_file_testing_utils import SDSingleFileTesterMixin
enable_full_determinism()
@slow
@require_torch_gpu
class StableDiffusionInpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionInpaintPipeline
ckpt_path = "https://huggingface.co/botp/stable-diffusion-v1-5-inpainting/blob/main/sd-v1-5-inpainting.ckpt"
original_config = "https://raw.githubusercontent.com/runwayml/stable-diffusion/main/configs/stable-diffusion/v1-inpainting-inference.yaml"
repo_id = "botp/stable-diffusion-v1-5-inpainting"
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
init_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_inpaint/input_bench_image.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_inpaint/input_bench_mask.png"
)
inputs = {
"prompt": "Face of a yellow cat, high resolution, sitting on a park bench",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
def test_single_file_loading_4_channel_unet(self):
# Test loading single file inpaint with a 4 channel UNet
ckpt_path = "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors"
pipe = self.pipeline_class.from_single_file(ckpt_path)
assert pipe.unet.config.in_channels == 4
@unittest.skip("runwayml original config has been removed")
def test_single_file_components_with_original_config(self):
return
@unittest.skip("runwayml original config has been removed")
def test_single_file_components_with_original_config_local_files_only(self):
return
@slow
@require_torch_gpu
class StableDiffusion21InpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionInpaintPipeline
ckpt_path = (
"https://huggingface.co/stabilityai/stable-diffusion-2-inpainting/blob/main/512-inpainting-ema.safetensors"
)
original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inpainting-inference.yaml"
repo_id = "stabilityai/stable-diffusion-2-inpainting"
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
init_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_inpaint/input_bench_image.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_inpaint/input_bench_mask.png"
)
inputs = {
"prompt": "Face of a yellow cat, high resolution, sitting on a park bench",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_single_file_format_inference_is_same_as_pretrained(self):
super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3)
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Tests for async interface."""
import asyncio
import os
import sys
import asynctest
import mmcv
import torch
from mmdet.apis import async_inference_detector, init_detector
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import concurrent
class AsyncTestCase(asynctest.TestCase):
use_default_loop = False
forbid_get_event_loop = True
TEST_TIMEOUT = int(os.getenv('ASYNCIO_TEST_TIMEOUT', '30'))
def _run_test_method(self, method):
result = method()
if asyncio.iscoroutine(result):
self.loop.run_until_complete(
asyncio.wait_for(result, timeout=self.TEST_TIMEOUT))
class MaskRCNNDetector:
def __init__(self,
model_config,
checkpoint=None,
streamqueue_size=3,
device='cuda:0'):
self.streamqueue_size = streamqueue_size
self.device = device
# build the model and load checkpoint
self.model = init_detector(
model_config, checkpoint=None, device=self.device)
self.streamqueue = None
async def init(self):
self.streamqueue = asyncio.Queue()
for _ in range(self.streamqueue_size):
stream = torch.cuda.Stream(device=self.device)
self.streamqueue.put_nowait(stream)
if sys.version_info >= (3, 7):
async def apredict(self, img):
if isinstance(img, str):
img = mmcv.imread(img)
async with concurrent(self.streamqueue):
result = await async_inference_detector(self.model, img)
return result
class AsyncInferenceTestCase(AsyncTestCase):
if sys.version_info >= (3, 7):
async def test_simple_inference(self):
if not torch.cuda.is_available():
import pytest
pytest.skip('test requires GPU and torch+cuda')
ori_grad_enabled = torch.is_grad_enabled()
root_dir = os.path.dirname(os.path.dirname(__name__))
model_config = os.path.join(
root_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
detector = MaskRCNNDetector(model_config)
await detector.init()
img_path = os.path.join(root_dir, 'demo/demo.jpg')
bboxes, _ = await detector.apredict(img_path)
self.assertTrue(bboxes)
# asy inference detector will hack grad_enabled,
# so restore here to avoid it to influence other tests
torch.set_grad_enabled(ori_grad_enabled)
|
"""Tests for async interface."""
import asyncio
import os
import sys
import asynctest
import mmcv
import torch
from mmdet.apis import async_inference_detector, init_detector
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import concurrent
class AsyncTestCase(asynctest.TestCase):
use_default_loop = False
forbid_get_event_loop = True
TEST_TIMEOUT = int(os.getenv('ASYNCIO_TEST_TIMEOUT', '30'))
def _run_test_method(self, method):
result = method()
if asyncio.iscoroutine(result):
self.loop.run_until_complete(
asyncio.wait_for(result, timeout=self.TEST_TIMEOUT))
class MaskRCNNDetector:
def __init__(self,
model_config,
checkpoint=None,
streamqueue_size=3,
device='cuda:0'):
self.streamqueue_size = streamqueue_size
self.device = device
# build the model and load checkpoint
self.model = init_detector(
model_config, checkpoint=None, device=self.device)
self.streamqueue = None
async def init(self):
self.streamqueue = asyncio.Queue()
for _ in range(self.streamqueue_size):
stream = torch.cuda.Stream(device=self.device)
self.streamqueue.put_nowait(stream)
if sys.version_info >= (3, 7):
async def apredict(self, img):
if isinstance(img, str):
img = mmcv.imread(img)
async with concurrent(self.streamqueue):
result = await async_inference_detector(self.model, img)
return result
class AsyncInferenceTestCase(AsyncTestCase):
if sys.version_info >= (3, 7):
async def test_simple_inference(self):
if not torch.cuda.is_available():
import pytest
pytest.skip('test requires GPU and torch+cuda')
ori_grad_enabled = torch.is_grad_enabled()
root_dir = os.path.dirname(os.path.dirname(__name__))
model_config = os.path.join(
root_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
detector = MaskRCNNDetector(model_config)
await detector.init()
img_path = os.path.join(root_dir, 'demo/demo.jpg')
bboxes, _ = await detector.apredict(img_path)
self.assertTrue(bboxes)
# asy inference detector will hack grad_enabled,
# so restore here to avoid it to influence other tests
torch.set_grad_enabled(ori_grad_enabled)
|
"""
This script contains an example how to perform semantic search with PyTorch. It performs exact nearest neighborh search.
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions (we only use about 100k):
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
As embeddings model, we use the SBERT model 'quora-distilbert-multilingual',
that it aligned for 100 languages. I.e., you can type in a question in various languages and it will
return the closest questions in the corpus (questions in the corpus are mainly in English).
Google Colab example: https://colab.research.google.com/drive/12cn5Oo0v3HfQQ8Tv6-ukgxXSmT3zl35A?usp=sharing
"""
import csv
import os
import pickle
import time
from sentence_transformers import SentenceTransformer, util
model_name = "quora-distilbert-multilingual"
model = SentenceTransformer(model_name)
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 100000
embedding_cache_path = "quora-embeddings-{}-size-{}.pkl".format(model_name.replace("/", "_"), max_corpus_size)
# Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_tensor=True)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({"sentences": corpus_sentences, "embeddings": corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data["sentences"][0:max_corpus_size]
corpus_embeddings = cache_data["embeddings"][0:max_corpus_size]
###############################
print("Corpus loaded with {} sentences / embeddings".format(len(corpus_sentences)))
# Move embeddings to the target device of the model
corpus_embeddings = corpus_embeddings.to(model.device)
while True:
inp_question = input("Please enter a question: ")
start_time = time.time()
question_embedding = model.encode(inp_question, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings)
end_time = time.time()
hits = hits[0] # Get the hits for the first query
print("Input question:", inp_question)
print("Results (after {:.3f} seconds):".format(end_time - start_time))
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit["score"], corpus_sentences[hit["corpus_id"]]))
print("\n\n========\n")
|
"""
This script contains an example how to perform semantic search with PyTorch. It performs exact nearest neighborh search.
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions (we only use about 100k):
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
As embeddings model, we use the SBERT model 'quora-distilbert-multilingual',
that it aligned for 100 languages. I.e., you can type in a question in various languages and it will
return the closest questions in the corpus (questions in the corpus are mainly in English).
Google Colab example: https://colab.research.google.com/drive/12cn5Oo0v3HfQQ8Tv6-ukgxXSmT3zl35A?usp=sharing
"""
from sentence_transformers import SentenceTransformer, util
import os
import csv
import pickle
import time
model_name = "quora-distilbert-multilingual"
model = SentenceTransformer(model_name)
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 100000
embedding_cache_path = "quora-embeddings-{}-size-{}.pkl".format(model_name.replace("/", "_"), max_corpus_size)
# Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_tensor=True)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({"sentences": corpus_sentences, "embeddings": corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data["sentences"][0:max_corpus_size]
corpus_embeddings = cache_data["embeddings"][0:max_corpus_size]
###############################
print("Corpus loaded with {} sentences / embeddings".format(len(corpus_sentences)))
# Move embeddings to the target device of the model
corpus_embeddings = corpus_embeddings.to(model.device)
while True:
inp_question = input("Please enter a question: ")
start_time = time.time()
question_embedding = model.encode(inp_question, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings)
end_time = time.time()
hits = hits[0] # Get the hits for the first query
print("Input question:", inp_question)
print("Results (after {:.3f} seconds):".format(end_time - start_time))
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit["score"], corpus_sentences[hit["corpus_id"]]))
print("\n\n========\n")
|
from typing import Dict
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
import torch
from torch.utils.data import DataLoader
import logging
from ..util import batch_to_device
import os
import csv
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
:param dataloader:
the data for the evaluation
"""
super().__init__()
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
self.primary_metric = "accuracy"
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info("Accuracy: {:.4f} ({}/{})\n".format(accuracy, correct, total))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
metrics = {"accuracy": accuracy}
metrics = self.prefix_name_to_metrics(metrics, self.name)
self.store_metrics_in_model_card_data(model, metrics)
return metrics
|
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
import torch
from torch.utils.data import DataLoader
import logging
from ..util import batch_to_device
import os
import csv
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
:param dataloader:
the data for the evaluation
"""
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
def __call__(self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info("Accuracy: {:.4f} ({}/{})\n".format(accuracy, correct, total))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
return accuracy
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
from datetime import datetime
from typing import Any, List
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from .helpers import ContentSettings
class ExaFindSimilarBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
url: str = SchemaField(
description="The url for which you would like to find similar links"
)
number_of_results: int = SchemaField(
description="Number of results to return",
default=10,
advanced=True,
)
include_domains: List[str] = SchemaField(
description="Domains to include in search",
default_factory=list,
advanced=True,
)
exclude_domains: List[str] = SchemaField(
description="Domains to exclude from search",
default_factory=list,
advanced=True,
)
start_crawl_date: datetime = SchemaField(
description="Start date for crawled content",
)
end_crawl_date: datetime = SchemaField(
description="End date for crawled content",
)
start_published_date: datetime = SchemaField(
description="Start date for published content",
)
end_published_date: datetime = SchemaField(
description="End date for published content",
)
include_text: List[str] = SchemaField(
description="Text patterns to include (max 1 string, up to 5 words)",
default_factory=list,
advanced=True,
)
exclude_text: List[str] = SchemaField(
description="Text patterns to exclude (max 1 string, up to 5 words)",
default_factory=list,
advanced=True,
)
contents: ContentSettings = SchemaField(
description="Content retrieval settings",
default=ContentSettings(),
advanced=True,
)
class Output(BlockSchema):
results: List[Any] = SchemaField(
description="List of similar documents with title, URL, published date, author, and score",
default_factory=list,
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="5e7315d1-af61-4a0c-9350-7c868fa7438a",
description="Finds similar links using Exa's findSimilar API",
categories={BlockCategory.SEARCH},
input_schema=ExaFindSimilarBlock.Input,
output_schema=ExaFindSimilarBlock.Output,
)
async def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/findSimilar"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"url": input_data.url,
"numResults": input_data.number_of_results,
"contents": input_data.contents.dict(),
}
optional_field_mapping = {
"include_domains": "includeDomains",
"exclude_domains": "excludeDomains",
"include_text": "includeText",
"exclude_text": "excludeText",
}
# Add optional fields if they have values
for input_field, api_field in optional_field_mapping.items():
value = getattr(input_data, input_field)
if value: # Only add non-empty values
payload[api_field] = value
date_field_mapping = {
"start_crawl_date": "startCrawlDate",
"end_crawl_date": "endCrawlDate",
"start_published_date": "startPublishedDate",
"end_published_date": "endPublishedDate",
}
# Add dates if they exist
for input_field, api_field in date_field_mapping.items():
value = getattr(input_data, input_field, None)
if value:
payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
try:
response = await Requests().post(url, headers=headers, json=payload)
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
|
from datetime import datetime
from typing import Any, List
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from .helpers import ContentSettings
class ExaFindSimilarBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
url: str = SchemaField(
description="The url for which you would like to find similar links"
)
number_of_results: int = SchemaField(
description="Number of results to return",
default=10,
advanced=True,
)
include_domains: List[str] = SchemaField(
description="Domains to include in search",
default_factory=list,
advanced=True,
)
exclude_domains: List[str] = SchemaField(
description="Domains to exclude from search",
default_factory=list,
advanced=True,
)
start_crawl_date: datetime = SchemaField(
description="Start date for crawled content",
)
end_crawl_date: datetime = SchemaField(
description="End date for crawled content",
)
start_published_date: datetime = SchemaField(
description="Start date for published content",
)
end_published_date: datetime = SchemaField(
description="End date for published content",
)
include_text: List[str] = SchemaField(
description="Text patterns to include (max 1 string, up to 5 words)",
default_factory=list,
advanced=True,
)
exclude_text: List[str] = SchemaField(
description="Text patterns to exclude (max 1 string, up to 5 words)",
default_factory=list,
advanced=True,
)
contents: ContentSettings = SchemaField(
description="Content retrieval settings",
default=ContentSettings(),
advanced=True,
)
class Output(BlockSchema):
results: List[Any] = SchemaField(
description="List of similar documents with title, URL, published date, author, and score",
default_factory=list,
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="5e7315d1-af61-4a0c-9350-7c868fa7438a",
description="Finds similar links using Exa's findSimilar API",
categories={BlockCategory.SEARCH},
input_schema=ExaFindSimilarBlock.Input,
output_schema=ExaFindSimilarBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/findSimilar"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"url": input_data.url,
"numResults": input_data.number_of_results,
"contents": input_data.contents.dict(),
}
optional_field_mapping = {
"include_domains": "includeDomains",
"exclude_domains": "excludeDomains",
"include_text": "includeText",
"exclude_text": "excludeText",
}
# Add optional fields if they have values
for input_field, api_field in optional_field_mapping.items():
value = getattr(input_data, input_field)
if value: # Only add non-empty values
payload[api_field] = value
date_field_mapping = {
"start_crawl_date": "startCrawlDate",
"end_crawl_date": "endCrawlDate",
"start_published_date": "startPublishedDate",
"end_published_date": "endPublishedDate",
}
# Add dates if they exist
for input_field, api_field in date_field_mapping.items():
value = getattr(input_data, input_field, None)
if value:
payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
try:
response = Requests().post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
|
from llama_index.tools.mcp.base import McpToolSpec
from llama_index.tools.mcp.client import BasicMCPClient
from llama_index.tools.mcp.utils import (
workflow_as_mcp,
get_tools_from_mcp_url,
aget_tools_from_mcp_url,
)
__all__ = [
"McpToolSpec",
"BasicMCPClient",
"workflow_as_mcp",
"get_tools_from_mcp_url",
"aget_tools_from_mcp_url",
]
|
from llama_index.tools.mcp.base import McpToolSpec
from llama_index.tools.mcp.client import BasicMCPClient
from llama_index.tools.mcp.utils import workflow_as_mcp, get_tools_from_mcp_url, aget_tools_from_mcp_url
__all__ = [
"McpToolSpec",
"BasicMCPClient",
"workflow_as_mcp",
"get_tools_from_mcp_url",
"aget_tools_from_mcp_url"
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
from typing import Optional
import torch
try:
import torch_npu # noqa: F401
import torch_npu.npu.utils as npu_utils
# Enable operator support for dynamic shape and
# binary operator support on the NPU.
npu_jit_compile = bool(os.getenv('NPUJITCompile', False))
torch.npu.set_compile_mode(jit_compile=npu_jit_compile)
IS_NPU_AVAILABLE = hasattr(torch, 'npu') and torch.npu.is_available()
except Exception:
IS_NPU_AVAILABLE = False
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_npu_available() -> bool:
"""Returns True if Ascend PyTorch and npu devices exist."""
return IS_NPU_AVAILABLE
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def is_mps_available() -> bool:
"""Return True if mps devices exist.
It's specialized for mac m1 chips and require torch version 1.12 or higher.
"""
return hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
def is_npu_support_full_precision() -> bool:
"""Returns True if npu devices support full precision training."""
version_of_support_full_precision = 220
return IS_NPU_AVAILABLE and npu_utils.get_soc_version(
) >= version_of_support_full_precision
DEVICE = 'cpu'
if is_npu_available():
DEVICE = 'npu'
elif is_cuda_available():
DEVICE = 'cuda'
elif is_mlu_available():
DEVICE = 'mlu'
elif is_mps_available():
DEVICE = 'mps'
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | npu | mlu | mps | cpu.
"""
return DEVICE
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
from typing import Optional
import torch
try:
import torch_npu # noqa: F401
# Enable operator support for dynamic shape and
# binary operator support on the NPU.
npu_jit_compile = bool(os.getenv('NPUJITCompile', False))
torch.npu.set_compile_mode(jit_compile=npu_jit_compile)
IS_NPU_AVAILABLE = hasattr(torch, 'npu') and torch.npu.is_available()
except Exception:
IS_NPU_AVAILABLE = False
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_npu_available() -> bool:
"""Returns True if Ascend PyTorch and npu devices exist."""
return IS_NPU_AVAILABLE
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def is_mps_available() -> bool:
"""Return True if mps devices exist.
It's specialized for mac m1 chips and require torch version 1.12 or higher.
"""
return hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
DEVICE = 'cpu'
if is_npu_available():
DEVICE = 'npu'
elif is_cuda_available():
DEVICE = 'cuda'
elif is_mlu_available():
DEVICE = 'mlu'
elif is_mps_available():
DEVICE = 'mps'
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | npu | mlu | mps | cpu.
"""
return DEVICE
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.random import seed_generator
class SeedGeneratorTest(testing.TestCase):
def test_seed_generator_initialization(self):
gen = seed_generator.SeedGenerator()
self.assertIsNotNone(gen.state)
seed = 12345
gen = seed_generator.SeedGenerator(seed=seed)
self.assertEqual(ops.convert_to_numpy(gen.state)[0], seed)
with self.assertRaisesRegex(
ValueError, "Argument `seed` must be an integer"
):
seed_generator.SeedGenerator(seed="invalid_seed")
def test_seed_generator_next(self):
gen = seed_generator.SeedGenerator(seed=42)
seed1 = ops.convert_to_numpy(gen.next())
seed2 = ops.convert_to_numpy(gen.next())
self.assertFalse(np.array_equal(seed1, seed2))
def test_global_seed_generator(self):
gen1 = seed_generator.global_seed_generator()
gen2 = seed_generator.global_seed_generator()
self.assertEqual(gen1, gen2)
def test_make_default_seed(self):
seed1 = seed_generator.make_default_seed()
seed2 = seed_generator.make_default_seed()
self.assertNotEqual(seed1, seed2)
def test_seed_generator_dtype(self):
gen = seed_generator.SeedGenerator(seed=42)
self.assertEqual(gen.state.dtype, backend.random_seed_dtype())
seed = gen.next()
self.assertEqual(gen.state.dtype, backend.random_seed_dtype())
self.assertEqual(
backend.standardize_dtype(seed.dtype), backend.random_seed_dtype()
)
def test_draw_seed_from_seed_generator(self):
gen = seed_generator.SeedGenerator(seed=42)
seed1 = seed_generator.draw_seed(gen)
self.assertTrue(backend.is_tensor(seed1))
def test_draw_seed_from_integer(self):
seed2 = seed_generator.draw_seed(12345)
self.assertTrue(backend.is_tensor(seed2))
self.assertEqual(
backend.standardize_dtype(seed2.dtype), backend.random_seed_dtype()
)
def test_draw_seed_from_none(self):
seed3 = seed_generator.draw_seed(None)
self.assertTrue(backend.is_tensor(seed3))
def test_draw_seed_invalid(self):
with self.assertRaisesRegex(
ValueError, "Argument `seed` must be either an integer"
):
seed_generator.draw_seed("invalid_seed")
def test_seed_generator_unexpected_kwargs(self):
with self.assertRaisesRegex(
ValueError, "Unrecognized keyword arguments"
):
seed_generator.SeedGenerator(invalid_arg="unexpected_value")
@pytest.mark.skipif(
backend.backend() != "jax", reason="This test requires the JAX backend"
)
def test_jax_tracing_with_global_seed_generator(self):
import jax
@jax.jit
def traced_function():
return seed_generator.global_seed_generator().next()
with self.assertRaisesRegex(
ValueError,
"When tracing a JAX function, you should only use seeded random",
):
traced_function()
def test_seed_generator_serialization(self):
random_generator = seed_generator.SeedGenerator(seed=42)
self.run_class_serialization_test(random_generator)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.random import seed_generator
class SeedGeneratorTest(testing.TestCase):
def test_seed_generator_initialization(self):
gen = seed_generator.SeedGenerator()
self.assertIsNotNone(gen.state)
seed = 12345
gen = seed_generator.SeedGenerator(seed=seed)
self.assertEqual(ops.convert_to_numpy(gen.state)[0], seed)
with self.assertRaisesRegex(
ValueError, "Argument `seed` must be an integer"
):
seed_generator.SeedGenerator(seed="invalid_seed")
def test_seed_generator_next(self):
gen = seed_generator.SeedGenerator(seed=42)
seed1 = ops.convert_to_numpy(gen.next())
seed2 = ops.convert_to_numpy(gen.next())
self.assertFalse(np.array_equal(seed1, seed2))
def test_global_seed_generator(self):
gen1 = seed_generator.global_seed_generator()
gen2 = seed_generator.global_seed_generator()
self.assertEqual(gen1, gen2)
def test_make_default_seed(self):
seed1 = seed_generator.make_default_seed()
seed2 = seed_generator.make_default_seed()
self.assertNotEqual(seed1, seed2)
def test_draw_seed_from_seed_generator(self):
gen = seed_generator.SeedGenerator(seed=42)
seed1 = seed_generator.draw_seed(gen)
self.assertTrue(backend.is_tensor(seed1))
def test_draw_seed_from_integer(self):
seed2 = seed_generator.draw_seed(12345)
self.assertTrue(backend.is_tensor(seed2))
def test_draw_seed_from_none(self):
seed3 = seed_generator.draw_seed(None)
self.assertTrue(backend.is_tensor(seed3))
def test_draw_seed_invalid(self):
with self.assertRaisesRegex(
ValueError, "Argument `seed` must be either an integer"
):
seed_generator.draw_seed("invalid_seed")
def test_seed_generator_unexpected_kwargs(self):
with self.assertRaisesRegex(
ValueError, "Unrecognized keyword arguments"
):
seed_generator.SeedGenerator(invalid_arg="unexpected_value")
@pytest.mark.skipif(
backend.backend() != "jax", reason="This test requires the JAX backend"
)
def test_jax_tracing_with_global_seed_generator(self):
import jax
@jax.jit
def traced_function():
return seed_generator.global_seed_generator().next()
with self.assertRaisesRegex(
ValueError,
"When tracing a JAX function, you should only use seeded random",
):
traced_function()
def test_seed_generator_serialization(self):
random_generator = seed_generator.SeedGenerator(seed=42)
self.run_class_serialization_test(random_generator)
|
from collections import defaultdict
import torch
import transforms as reference_transforms
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
return torchvision.transforms.v2, torchvision.datapoints
else:
return reference_transforms, None
class DetectionPresetTrain:
# Note: this transform assumes that the input to forward() are always PIL
# images, regardless of the backend parameter.
def __init__(
self,
*,
data_augmentation,
hflip_prob=0.5,
mean=(123.0, 117.0, 104.0),
backend="pil",
use_v2=False,
):
T, datapoints = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImage())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if data_augmentation == "hflip":
transforms += [T.RandomHorizontalFlip(p=hflip_prob)]
elif data_augmentation == "lsj":
transforms += [
T.ScaleJitter(target_size=(1024, 1024), antialias=True),
# TODO: FixedSizeCrop below doesn't work on tensors!
reference_transforms.FixedSizeCrop(size=(1024, 1024), fill=mean),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "multiscale":
transforms += [
T.RandomShortestSize(min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssd":
fill = defaultdict(lambda: mean, {datapoints.Mask: 0}) if use_v2 else list(mean)
transforms += [
T.RandomPhotometricDistort(),
T.RandomZoomOut(fill=fill),
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssdlite":
transforms += [
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
else:
raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
if backend == "pil":
# Note: we could just convert to pure tensors even in v2.
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
transforms += [T.ToDtype(torch.float, scale=True)]
if use_v2:
transforms += [
T.ConvertBoundingBoxFormat(datapoints.BoundingBoxFormat.XYXY),
T.SanitizeBoundingBoxes(),
T.ToPureTensor(),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class DetectionPresetEval:
def __init__(self, backend="pil", use_v2=False):
T, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
elif backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImage()]
else:
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.ToDtype(torch.float, scale=True)]
if use_v2:
transforms += [T.ToPureTensor()]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
from collections import defaultdict
import torch
import transforms as reference_transforms
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
return torchvision.transforms.v2, torchvision.datapoints
else:
return reference_transforms, None
class DetectionPresetTrain:
# Note: this transform assumes that the input to forward() are always PIL
# images, regardless of the backend parameter.
def __init__(
self,
*,
data_augmentation,
hflip_prob=0.5,
mean=(123.0, 117.0, 104.0),
backend="pil",
use_v2=False,
):
T, datapoints = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImage())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if data_augmentation == "hflip":
transforms += [T.RandomHorizontalFlip(p=hflip_prob)]
elif data_augmentation == "lsj":
transforms += [
T.ScaleJitter(target_size=(1024, 1024), antialias=True),
# TODO: FixedSizeCrop below doesn't work on tensors!
reference_transforms.FixedSizeCrop(size=(1024, 1024), fill=mean),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "multiscale":
transforms += [
T.RandomShortestSize(min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssd":
fill = defaultdict(lambda: mean, {datapoints.Mask: 0}) if use_v2 else list(mean)
transforms += [
T.RandomPhotometricDistort(),
T.RandomZoomOut(fill=fill),
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
elif data_augmentation == "ssdlite":
transforms += [
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
]
else:
raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
if backend == "pil":
# Note: we could just convert to pure tensors even in v2.
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
transforms += [T.ConvertImageDtype(torch.float)]
if use_v2:
transforms += [
T.ConvertBoundingBoxFormat(datapoints.BoundingBoxFormat.XYXY),
T.SanitizeBoundingBoxes(),
T.ToPureTensor(),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class DetectionPresetEval:
def __init__(self, backend="pil", use_v2=False):
T, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
elif backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImage()]
else:
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.ConvertImageDtype(torch.float)]
if use_v2:
transforms += [T.ToPureTensor()]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
import os
from typing import Optional
import fsspec
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.core.storage.index_store.types import (
DEFAULT_PERSIST_DIR,
DEFAULT_PERSIST_FNAME,
DEFAULT_PERSIST_PATH,
)
from llama_index.core.storage.kvstore.simple_kvstore import SimpleKVStore
from llama_index.core.storage.kvstore.types import BaseInMemoryKVStore
from llama_index.core.utils import concat_dirs
class SimpleIndexStore(KVIndexStore):
"""
Simple in-memory Index store.
Args:
simple_kvstore (SimpleKVStore): simple key-value store
"""
def __init__(
self,
simple_kvstore: Optional[SimpleKVStore] = None,
) -> None:
"""Init a SimpleIndexStore."""
simple_kvstore = simple_kvstore or SimpleKVStore()
super().__init__(simple_kvstore)
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "SimpleIndexStore":
"""Create a SimpleIndexStore from a persist directory."""
if fs is not None:
persist_path = concat_dirs(persist_dir, DEFAULT_PERSIST_FNAME)
else:
persist_path = os.path.join(persist_dir, DEFAULT_PERSIST_FNAME)
return cls.from_persist_path(persist_path, fs=fs)
@classmethod
def from_persist_path(
cls,
persist_path: str,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "SimpleIndexStore":
"""Create a SimpleIndexStore from a persist path."""
fs = fs or fsspec.filesystem("file")
simple_kvstore = SimpleKVStore.from_persist_path(persist_path, fs=fs)
return cls(simple_kvstore)
def persist(
self,
persist_path: str = DEFAULT_PERSIST_PATH,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> None:
"""Persist the store."""
if isinstance(self._kvstore, BaseInMemoryKVStore):
self._kvstore.persist(persist_path, fs=fs)
@classmethod
def from_dict(cls, save_dict: dict) -> "SimpleIndexStore":
simple_kvstore = SimpleKVStore.from_dict(save_dict)
return cls(simple_kvstore)
def to_dict(self) -> dict:
assert isinstance(self._kvstore, SimpleKVStore)
return self._kvstore.to_dict()
|
import os
from typing import Optional
import fsspec
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.core.storage.index_store.types import (
DEFAULT_PERSIST_DIR,
DEFAULT_PERSIST_FNAME,
DEFAULT_PERSIST_PATH,
)
from llama_index.core.storage.kvstore.simple_kvstore import SimpleKVStore
from llama_index.core.storage.kvstore.types import BaseInMemoryKVStore
from llama_index.core.utils import concat_dirs
class SimpleIndexStore(KVIndexStore):
"""Simple in-memory Index store.
Args:
simple_kvstore (SimpleKVStore): simple key-value store
"""
def __init__(
self,
simple_kvstore: Optional[SimpleKVStore] = None,
) -> None:
"""Init a SimpleIndexStore."""
simple_kvstore = simple_kvstore or SimpleKVStore()
super().__init__(simple_kvstore)
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "SimpleIndexStore":
"""Create a SimpleIndexStore from a persist directory."""
if fs is not None:
persist_path = concat_dirs(persist_dir, DEFAULT_PERSIST_FNAME)
else:
persist_path = os.path.join(persist_dir, DEFAULT_PERSIST_FNAME)
return cls.from_persist_path(persist_path, fs=fs)
@classmethod
def from_persist_path(
cls,
persist_path: str,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "SimpleIndexStore":
"""Create a SimpleIndexStore from a persist path."""
fs = fs or fsspec.filesystem("file")
simple_kvstore = SimpleKVStore.from_persist_path(persist_path, fs=fs)
return cls(simple_kvstore)
def persist(
self,
persist_path: str = DEFAULT_PERSIST_PATH,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> None:
"""Persist the store."""
if isinstance(self._kvstore, BaseInMemoryKVStore):
self._kvstore.persist(persist_path, fs=fs)
@classmethod
def from_dict(cls, save_dict: dict) -> "SimpleIndexStore":
simple_kvstore = SimpleKVStore.from_dict(save_dict)
return cls(simple_kvstore)
def to_dict(self) -> dict:
assert isinstance(self._kvstore, SimpleKVStore)
return self._kvstore.to_dict()
|
import logging
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
):
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.node_id:
if event.status in [ExecutionStatus.COMPLETED, ExecutionStatus.FAILED]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
import logging
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.queue import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
):
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.node_id:
if event.status in [ExecutionStatus.COMPLETED, ExecutionStatus.FAILED]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
from typing import Dict, Optional, Sequence
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from transformers import CLIPModel, CLIPTokenizer
class CLIPTextEncoder(Executor):
"""Encode text into embeddings using the CLIP model."""
def __init__(
self,
pretrained_model_name_or_path: str = 'openai/clip-vit-base-patch32',
base_tokenizer_model: Optional[str] = None,
max_length: int = 77,
device: str = 'cpu',
traversal_paths: Sequence[str] = ['r'],
batch_size: int = 32,
*args,
**kwargs,
):
"""
:param pretrained_model_name_or_path: Can be either:
- A string, the model id of a pretrained CLIP model hosted
inside a model repo on huggingface.co, e.g., 'openai/clip-vit-base-patch32'
- A path to a directory containing model weights saved, e.g., ./my_model_directory/
:param base_tokenizer_model: Base tokenizer model.
Defaults to ``pretrained_model_name_or_path`` if None
:param max_length: Max length argument for the tokenizer.
All CLIP models use 77 as the max length
:param device: Pytorch device to put the model on, e.g. 'cpu', 'cuda', 'cuda:1'
:param traversal_paths: Default traversal paths for encoding, used if
the traversal path is not passed as a parameter with the request.
:param batch_size: Default batch size for encoding, used if the
batch size is not passed as a parameter with the request.
"""
super().__init__(*args, **kwargs)
self.default_traversal_paths = traversal_paths
self.default_batch_size = batch_size
self.pretrained_model_name_or_path = pretrained_model_name_or_path
self.base_tokenizer_model = (
base_tokenizer_model or pretrained_model_name_or_path
)
self.max_length = max_length
self.device = device
self.tokenizer = CLIPTokenizer.from_pretrained(self.base_tokenizer_model)
self.model = CLIPModel.from_pretrained(self.pretrained_model_name_or_path)
self.model.eval().to(device)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all documents with the `text` attribute and store the embeddings in the
`embedding` attribute.
:param docs: DocumentArray containing the Documents to be encoded
:param parameters: A dictionary that contains parameters to control encoding.
The accepted keys are ``traversal_paths`` and ``batch_size`` - in their
absence their corresponding default values are used.
"""
for docs_batch in get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
):
text_batch = docs_batch.get_attributes('text')
with torch.no_grad():
input_tokens = self._generate_input_tokens(text_batch)
embeddings = self.model.get_text_features(**input_tokens).cpu().numpy()
for doc, embedding in zip(docs_batch, embeddings):
doc.embedding = embedding
def _generate_input_tokens(self, texts: Sequence[str]):
input_tokens = self.tokenizer(
texts,
max_length=self.max_length,
padding='longest',
truncation=True,
return_tensors='pt',
)
input_tokens = {k: v.to(self.device) for k, v in input_tokens.items()}
return input_tokens
|
from typing import Dict, Optional, Sequence
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from transformers import CLIPModel, CLIPTokenizer
class CLIPTextEncoder(Executor):
"""Encode text into embeddings using the CLIP model."""
def __init__(
self,
pretrained_model_name_or_path: str = 'openai/clip-vit-base-patch32',
base_tokenizer_model: Optional[str] = None,
max_length: int = 77,
device: str = 'cpu',
traversal_paths: Sequence[str] = ['r'],
batch_size: int = 32,
*args,
**kwargs,
):
"""
:param pretrained_model_name_or_path: Can be either:
- A string, the model id of a pretrained CLIP model hosted
inside a model repo on huggingface.co, e.g., 'openai/clip-vit-base-patch32'
- A path to a directory containing model weights saved, e.g.
`./my_model_directory/`
:param base_tokenizer_model: Base tokenizer model.
Defaults to ``pretrained_model_name_or_path`` if None
:param max_length: Max length argument for the tokenizer.
All CLIP models use 77 as the max length
:param device: Pytorch device to put the model on, e.g. 'cpu', 'cuda', 'cuda:1'
:param traversal_paths: Default traversal paths for encoding, used if
the traversal path is not passed as a parameter with the request.
:param batch_size: Default batch size for encoding, used if the
batch size is not passed as a parameter with the request.
"""
super().__init__(*args, **kwargs)
self.default_traversal_paths = traversal_paths
self.default_batch_size = batch_size
self.pretrained_model_name_or_path = pretrained_model_name_or_path
self.base_tokenizer_model = (
base_tokenizer_model or pretrained_model_name_or_path
)
self.max_length = max_length
self.device = device
self.tokenizer = CLIPTokenizer.from_pretrained(self.base_tokenizer_model)
self.model = CLIPModel.from_pretrained(self.pretrained_model_name_or_path)
self.model.eval().to(device)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all documents with the `text` attribute and store the embeddings in the
`embedding` attribute.
:param docs: DocumentArray containing the Documents to be encoded
:param parameters: A dictionary that contains parameters to control encoding.
The accepted keys are ``traversal_paths`` and ``batch_size`` - in their
absence their corresponding default values are used.
"""
for docs_batch in get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
):
text_batch = docs_batch.get_attributes('text')
with torch.no_grad():
input_tokens = self._generate_input_tokens(text_batch)
embeddings = self.model.get_text_features(**input_tokens).cpu().numpy()
for doc, embedding in zip(docs_batch, embeddings):
doc.embedding = embedding
def _generate_input_tokens(self, texts: Sequence[str]):
input_tokens = self.tokenizer(
texts,
max_length=self.max_length,
padding='longest',
truncation=True,
return_tensors='pt',
)
input_tokens = {k: v.to(self.device) for k, v in input_tokens.items()}
return input_tokens
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for semantic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
import os
from sentence_transformers import InputExample, SentenceTransformer, datasets, losses, models
train_examples = []
with open("generated_queries.tsv") as fIn:
for line in fIn:
query, paragraph = line.strip().split("\t", maxsplit=1)
train_examples.append(InputExample(texts=[query, paragraph]))
# For the MultipleNegativesRankingLoss, it is important
# that the batch does not contain duplicate entries, i.e.
# no two equal queries and no two equal paragraphs.
# To ensure this, we use a special data loader
train_dataloader = datasets.NoDuplicatesDataLoader(train_examples, batch_size=64)
# Now we create a SentenceTransformer model from scratch
word_emb = models.Transformer("distilbert-base-uncased")
pooling = models.Pooling(word_emb.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_emb, pooling])
# MultipleNegativesRankingLoss requires input pairs (query, relevant_passage)
# and trains the model so that is is suitable for semantic search
train_loss = losses.MultipleNegativesRankingLoss(model)
# Tune the model
num_epochs = 3
warmup_steps = int(len(train_dataloader) * num_epochs * 0.1)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
show_progress_bar=True,
)
os.makedirs("output", exist_ok=True)
model.save("output/programming-model")
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for semantic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
from sentence_transformers import SentenceTransformer, InputExample, losses, models, datasets
import os
train_examples = []
with open("generated_queries.tsv") as fIn:
for line in fIn:
query, paragraph = line.strip().split("\t", maxsplit=1)
train_examples.append(InputExample(texts=[query, paragraph]))
# For the MultipleNegativesRankingLoss, it is important
# that the batch does not contain duplicate entries, i.e.
# no two equal queries and no two equal paragraphs.
# To ensure this, we use a special data loader
train_dataloader = datasets.NoDuplicatesDataLoader(train_examples, batch_size=64)
# Now we create a SentenceTransformer model from scratch
word_emb = models.Transformer("distilbert-base-uncased")
pooling = models.Pooling(word_emb.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_emb, pooling])
# MultipleNegativesRankingLoss requires input pairs (query, relevant_passage)
# and trains the model so that is is suitable for semantic search
train_loss = losses.MultipleNegativesRankingLoss(model)
# Tune the model
num_epochs = 3
warmup_steps = int(len(train_dataloader) * num_epochs * 0.1)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
show_progress_bar=True,
)
os.makedirs("output", exist_ok=True)
model.save("output/programming-model")
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_panoptic.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='PanopticFPN',
semantic_head=dict(
type='PanopticFPNHead',
num_classes=54,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
conv_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5)),
panoptic_fusion_head=dict(
type='HeuristicFusionHead',
num_things_classes=80,
num_stuff_classes=53),
test_cfg=dict(
panoptic=dict(
score_thr=0.6,
max_per_img=100,
mask_thr_binary=0.5,
mask_overlap=0.5,
nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True),
stuff_area_limit=4096)))
custom_hooks = []
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_panoptic.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='PanopticFPN',
semantic_head=dict(
type='PanopticFPNHead',
num_classes=54,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
conv_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=-1, loss_weight=0.5)),
panoptic_fusion_head=dict(
type='HeuristicFusionHead',
num_things_classes=80,
num_stuff_classes=53),
test_cfg=dict(
panoptic=dict(
score_thr=0.6,
max_per_img=100,
mask_thr_binary=0.5,
mask_overlap=0.5,
nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True),
stuff_area_limit=4096)))
custom_hooks = []
|
import io
import warnings
from abc import ABC
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> bytes:
"""
Convert image tensor to bytes.
:param format: the image format use to store the image, can be 'PNG' , 'JPG' ...
:return: bytes
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
if format == 'jpg':
format = 'jpeg' # unify it to ISO standard
tensor = self.get_comp_backend().to_numpy(self)
mode = 'RGB' if tensor.ndim == 3 else 'L'
pil_image = PILImage.fromarray(tensor, mode=mode)
with io.BytesIO() as buffer:
pil_image.save(buffer, format=format)
img_byte_arr = buffer.getvalue()
return img_byte_arr
def display(self) -> None:
"""
Display image data from tensor in notebook.
"""
if is_notebook():
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
np_array = self.get_comp_backend().to_numpy(self)
img = PILImage.fromarray(np_array)
from IPython.display import display
display(img)
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import io
import warnings
from abc import ABC
from typing import TYPE_CHECKING
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> bytes:
"""
Convert image tensor to bytes.
:param format: the image format use to store the image, can be 'PNG' , 'JPG' ...
:return: bytes
"""
if TYPE_CHECKING:
from PIL import Image as PILImage
else:
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
if format == 'jpg':
format = 'jpeg' # unify it to ISO standard
tensor = self.get_comp_backend().to_numpy(self)
mode = 'RGB' if tensor.ndim == 3 else 'L'
pil_image = PILImage.fromarray(tensor, mode=mode)
with io.BytesIO() as buffer:
pil_image.save(buffer, format=format)
img_byte_arr = buffer.getvalue()
return img_byte_arr
def display(self) -> None:
"""
Display image data from tensor in notebook.
"""
if is_notebook():
if TYPE_CHECKING:
from PIL import Image as PILImage
else:
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
np_array = self.get_comp_backend().to_numpy(self)
img = PILImage.fromarray(np_array)
from IPython.display import display
display(img)
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import json
import logging
import os
from typing import Dict, List
import torch
from torch import Tensor, nn
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: List[str],
word_weights: Dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super(BoW, self).__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
"{} out of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: List[str], **kwargs) -> List[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(self, tokenized_texts: List[List[int]], pad_seq_length: int = 0):
vectors = []
for tokens in tokenized_texts:
vector = torch.zeros(self.get_sentence_embedding_dimension(), dtype=torch.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.stack(vectors)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
import torch
from torch import Tensor
from torch import nn
from typing import Union, Tuple, List, Iterable, Dict
import os
import json
import logging
import numpy as np
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(self, vocab: List[str], word_weights: Dict[str, float] = {}, unknown_word_weight: float = 1, cumulative_term_frequency: bool = True):
super(BoW, self).__init__()
vocab = list(set(vocab)) #Ensure vocab is unique
self.config_keys = ['vocab', 'word_weights', 'unknown_word_weight', 'cumulative_term_frequency']
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
#Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info("{} out of {} words without a weighting value. Set weight to {}".format(num_unknown_words, len(vocab), unknown_word_weight))
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[str, Tensor]):
#Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: List[str]) -> List[int]:
tokenized = [self.tokenizer.tokenize(text) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(self, tokenized_texts: List[List[int]], pad_seq_length: int = 0):
vectors = []
for tokens in tokenized_texts:
vector = np.zeros(self.get_sentence_embedding_dimension(), dtype=np.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {'sentence_embedding': torch.tensor(vectors, dtype=torch.float)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
return BoW(**config)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import TEXT_FILE_FORMATS
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='TextUrl')
@_register_proto(proto_type_name='text_url')
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
):
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_valid_text_extension = ext in TEXT_FILE_FORMATS or ext == ''
if not has_valid_text_extension:
raise ValueError('Text URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
---
```python
from docarray import BaseDoc
from docarray.typing import TextUrl
class MyDoc(BaseDoc):
remote_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
)
remote_txt = doc.remote_url.load()
```
---
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = self.load_bytes(timeout=timeout)
return _bytes.decode(charset)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import TEXT_FILE_FORMATS
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='TextUrl')
@_register_proto(proto_type_name='text_url')
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
):
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_valid_text_extension = ext in TEXT_FILE_FORMATS or ext == ''
if not has_valid_text_extension:
raise ValueError('Text URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
from docarray.typing import TextUrl
class MyDoc(BaseDoc):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt = doc.remote_url.load()
print(remote_txt)
# prints: ```<!DOCTYPE html>\n<html class="client-nojs" ... > ...```
local_txt = doc.local_url.load()
print(local_txt)
# prints content of my_file.txt
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = self.load_bytes(timeout=timeout)
return _bytes.decode(charset)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, InferencerLoader, LoadAnnotations,
LoadEmptyAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixShapeResize, MinIoURandomCrop, MixUp,
Mosaic, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize', 'ProposalBroadcaster', 'InferencerLoader'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadEmptyAnnotations,
LoadImageFromNDArray, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixShapeResize, MinIoURandomCrop, MixUp,
Mosaic, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize', 'ProposalBroadcaster'
]
|
import math
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck
from torchaudio_unittest.common_utils import TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
numel = math.prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
def test_extend_pitch(self):
num_frames, num_pitches = 5, 7
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype, requires_grad=True)
pattern = torch.linspace(1, num_pitches, num_pitches, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.extend_pitch, (input, num_pitches))
assert gradcheck(F.extend_pitch, (input, pattern))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False))
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.frequency_impulse_response, (mags,))
def test_filter_waveform(self):
waveform = torch.rand(3, 1, 2, 10, device=self.device, dtype=self.dtype, requires_grad=True)
filters = torch.rand(3, 2, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.filter_waveform, (waveform, filters))
def test_exp_sigmoid_input(self):
input = torch.linspace(-5, 5, 20, device=self.device, dtype=self.dtype, requires_grad=True)
exponent = 10.0
max_value = 2.0
threshold = 1e-7
assert gradcheck(F.exp_sigmoid, (input, exponent, max_value, threshold))
|
import math
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck
from torchaudio_unittest.common_utils import TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
numel = math.prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
def test_extend_pitch(self):
num_frames, num_pitches = 5, 7
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype, requires_grad=True)
pattern = torch.linspace(1, num_pitches, num_pitches, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.extend_pitch, (input, num_pitches))
assert gradcheck(F.extend_pitch, (input, pattern))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False))
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True))
def test_freq_ir(self):
mags = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.frequency_impulse_response, (mags,))
def test_filter_waveform(self):
waveform = torch.rand(3, 1, 2, 10, device=self.device, dtype=self.dtype, requires_grad=True)
filters = torch.rand(3, 2, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.filter_waveform, (waveform, filters))
|
_base_ = './faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py'
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from __future__ import annotations
import functools
import operator
from typing import Any, TYPE_CHECKING
import torch
# NOTE: other files rely on the imports below
from torch._dynamo import callback as compilation_callback # noqa: F401
from torch._inductor.runtime.cache_dir_utils import ( # noqa: F401
cache_dir,
default_cache_dir,
triton_cache_dir,
)
if TYPE_CHECKING:
from collections.abc import Hashable
from .triton_compat import Config
def conditional_product(*args: int) -> int:
return functools.reduce(operator.mul, [x for x in args if x])
def ceildiv(number: int, denom: int) -> int:
return -(number // -denom)
def is_power_of_2(n: int) -> bool:
"""Returns whether n = 2 ** m for some integer m."""
return n > 0 and n & n - 1 == 0
def next_power_of_2(n: int) -> int:
"""Return the smallest power of 2 greater than or equal to n"""
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n |= n >> 32
n += 1
return n
def get_num_bytes(*args: torch.Tensor, num_in_out_args: int = 0) -> int:
"""
Return the total number of bytes the arguments of tensor type takes.
For in/out args, tensor sizes are counted twice: once for reading and
once for writing.
The first num_in_out_args arguments are in out tensors.
"""
return sum(
arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args))
for i, arg in enumerate(args)
if isinstance(arg, torch.Tensor)
)
def triton_config_to_hashable(cfg: Config) -> Hashable:
"""
Convert triton config to a tuple that can uniquely identify it. We can use
the return value as a dictionary key.
"""
items = sorted(cfg.kwargs.items())
items.append(("num_warps", cfg.num_warps))
items.append(("num_stages", cfg.num_stages))
return tuple(items)
def validate_triton_config(cfg: Config) -> None:
# [Note: Triton pre_hook in inductor]
# pre-hook is a lambda function, which we don't attempt to serialize.
# right now, if a pre-hook is attached to the config, it will not be saved;
# and then it won't be used when the config is loaded from cache.
# So we assert - if we do get a pre_hook, it might get ignored after caching.
assert getattr(cfg, "pre_hook", None) is None, (
"triton configs with pre_hooks not supported"
)
def create_bandwidth_info_str(
ms: float,
num_gb: float,
gb_per_s: float,
prefix: str = "",
suffix: str = "",
color: bool = True,
) -> str:
info_str = f"{prefix}{ms:.3f}ms \t{num_gb:.3f} GB \t {gb_per_s:7.2f}GB/s{suffix}"
slow = ms > 0.012 and gb_per_s < 650
return red_text(info_str) if color and slow else info_str
def get_max_y_grid() -> int:
return 65535
try:
import colorama
HAS_COLORAMA = True
except ModuleNotFoundError:
HAS_COLORAMA = False
colorama = None # type: ignore[assignment]
if HAS_COLORAMA:
def _color_text(msg: str, color: str) -> str:
return getattr(colorama.Fore, color.upper()) + msg + colorama.Fore.RESET
else:
def _color_text(msg: str, color: str) -> str:
return msg
def green_text(msg: str) -> str:
return _color_text(msg, "green")
def yellow_text(msg: str) -> str:
return _color_text(msg, "yellow")
def red_text(msg: str) -> str:
return _color_text(msg, "red")
def blue_text(msg: str) -> str:
return _color_text(msg, "blue")
def get_first_attr(obj: Any, *attrs: str) -> Any:
"""
Return the first available attribute or throw an exception if none is present.
"""
for attr in attrs:
if hasattr(obj, attr):
return getattr(obj, attr)
raise AssertionError(f"{obj} does not has any of the attributes: {attrs}")
dynamo_timed = torch._dynamo.utils.dynamo_timed # type: ignore[has-type]
def triton_hash_to_path_key(key: str) -> str:
# In early versions of Triton, the hash is directly used in the path name.
# Later, the hash is converted to base64 before being used in the path name.
# Later, the base64 conversion was replaced to the base32
#
# This code tries to import _base64 and falls back to _base32 if _base64 is unavailable.
#
# To handle this, try to import the to-base64-conversion function.
# If it exists, use it; otherwise, try using _base32; if both are unavailable, use the hash directly.
try:
from triton.runtime.cache import _base64
return _base64(key)
except Exception:
try:
from triton.runtime.cache import _base32
return _base32(key)
except Exception:
return key
def compile_mps_shader(source: str) -> Any:
"""
Compiles shader source but raise more actionable error message when needed
"""
try:
return torch.mps.compile_shader(source)
except SyntaxError as err:
raise SyntaxError(f"failed to compile {source} with {err.msg}") from err
|
from __future__ import annotations
import functools
import operator
from typing import Any, TYPE_CHECKING
import torch
# NOTE: other files rely on the imports below
from torch._dynamo import callback as compilation_callback # noqa: F401
from torch._inductor.runtime.cache_dir_utils import ( # noqa: F401
cache_dir,
default_cache_dir,
triton_cache_dir,
)
if TYPE_CHECKING:
from collections.abc import Hashable
from .triton_compat import Config
def conditional_product(*args: int) -> int:
return functools.reduce(operator.mul, [x for x in args if x])
def ceildiv(numer: int, denom: int) -> int:
return -(numer // -denom)
def is_power_of_2(n: int) -> bool:
"""Returns whether n = 2 ** m for some integer m."""
return n > 0 and n & n - 1 == 0
def next_power_of_2(n: int) -> int:
"""Return the smallest power of 2 greater than or equal to n"""
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n |= n >> 32
n += 1
return n
def get_num_bytes(*args: torch.Tensor, num_in_out_args: int = 0) -> int:
"""
Return the total number of bytes the arguments of tensor type takes.
For in/out args, tensor sizes are counted twice: once for reading and
once for writing.
The first num_in_out_args arguments are in out tensors.
"""
return sum(
arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args))
for i, arg in enumerate(args)
if isinstance(arg, torch.Tensor)
)
def triton_config_to_hashable(cfg: Config) -> Hashable:
"""
Convert triton config to a tuple that can uniquely identify it. We can use
the return value as a dictionary key.
"""
items = sorted(cfg.kwargs.items())
items.append(("num_warps", cfg.num_warps))
items.append(("num_stages", cfg.num_stages))
return tuple(items)
def validate_triton_config(cfg: Config) -> None:
# [Note: Triton pre_hook in inductor]
# pre-hook is a lambda function, which we don't attempt to serialize.
# right now, if a pre-hook is attached to the config, it will not be saved;
# and then it won't be used when the config is loaded from cache.
# So we assert - if we do get a pre_hook, it might get ignored after caching.
assert getattr(cfg, "pre_hook", None) is None, (
"triton configs with pre_hooks not supported"
)
def create_bandwidth_info_str(
ms: float,
num_gb: float,
gb_per_s: float,
prefix: str = "",
suffix: str = "",
color: bool = True,
) -> str:
info_str = f"{prefix}{ms:.3f}ms \t{num_gb:.3f} GB \t {gb_per_s:7.2f}GB/s{suffix}"
slow = ms > 0.012 and gb_per_s < 650
return red_text(info_str) if color and slow else info_str
def get_max_y_grid() -> int:
return 65535
try:
import colorama
HAS_COLORAMA = True
except ModuleNotFoundError:
HAS_COLORAMA = False
colorama = None # type: ignore[assignment]
if HAS_COLORAMA:
def _color_text(msg: str, color: str) -> str:
return getattr(colorama.Fore, color.upper()) + msg + colorama.Fore.RESET
else:
def _color_text(msg: str, color: str) -> str:
return msg
def green_text(msg: str) -> str:
return _color_text(msg, "green")
def yellow_text(msg: str) -> str:
return _color_text(msg, "yellow")
def red_text(msg: str) -> str:
return _color_text(msg, "red")
def blue_text(msg: str) -> str:
return _color_text(msg, "blue")
def get_first_attr(obj: Any, *attrs: str) -> Any:
"""
Return the first available attribute or throw an exception if none is present.
"""
for attr in attrs:
if hasattr(obj, attr):
return getattr(obj, attr)
raise AssertionError(f"{obj} does not has any of the attributes: {attrs}")
dynamo_timed = torch._dynamo.utils.dynamo_timed # type: ignore[has-type]
def triton_hash_to_path_key(key: str) -> str:
# In early versions of Triton, the hash is directly used in the path name.
# Later, the hash is converted to base64 before being used in the path name.
# Later, the base64 convertion was replaced to the base32
#
# This code tries to import _base64 and falls back to _base32 if _base64 is unavailable.
#
# To handle this, try to import the to-base64-conversion function.
# If it exists, use it; otherwise, try using _base32; if both are unavailable, use the hash directly.
try:
from triton.runtime.cache import _base64
return _base64(key)
except Exception:
try:
from triton.runtime.cache import _base32
return _base32(key)
except Exception:
return key
def compile_mps_shader(source: str) -> Any:
"""
Compiles shader source but raise more actionable error message when needed
"""
try:
return torch.mps.compile_shader(source)
except SyntaxError as err:
raise SyntaxError(f"failed to compile {source} with {err.msg}") from err
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5))
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
model = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
# TODO: MMEngine does not support fp16 yet.
# fp16 = dict(loss_scale=512.)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(type='AmpOptimWrapper', optimizer=dict(lr=0.01))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import O365SendMessage
from langchain_community.tools.office365.send_message import SendMessageSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SendMessageSchema": "langchain_community.tools.office365.send_message",
"O365SendMessage": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"O365SendMessage",
"SendMessageSchema",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import O365SendMessage
from langchain_community.tools.office365.send_message import SendMessageSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SendMessageSchema": "langchain_community.tools.office365.send_message",
"O365SendMessage": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SendMessageSchema",
"O365SendMessage",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestMaskScoringRoiHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'ms_rcnn/ms-rcnn_r50_fpn_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
self.assertTrue(roi_head.with_mask)
self.assertTrue(roi_head.mask_iou_head)
def test_mask_scoring_roi_head_loss(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
loss_mask = out['loss_mask']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
self.assertGreater(loss_mask.sum(), 0, 'mask loss should be non-zero')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
empty_mask_loss = out['loss_mask']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_mask_loss.sum(), 0,
'there should be no mask loss when there are no true boxes')
def test_mask_scoring_roi_head_predict(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
roi_head.predict(feats, proposals_list, batch_data_samples)
def test_mask_scoring_roi_head_forward(self):
"""Tests trident roi head forward."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
roi_head.forward(feats, proposals_list)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
from mmdet.utils import register_all_modules
class TestMaskScoringRoiHead(TestCase):
def setUp(self):
register_all_modules()
self.roi_head_cfg = get_roi_head_cfg(
'ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py')
def test_init(self):
roi_head = MODELS.build(self.roi_head_cfg)
self.assertTrue(roi_head.with_bbox)
self.assertTrue(roi_head.with_mask)
self.assertTrue(roi_head.mask_iou_head)
def test_mask_scoring_roi_head_loss(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
loss_cls = out['loss_cls']
loss_bbox = out['loss_bbox']
loss_mask = out['loss_mask']
self.assertGreater(loss_cls.sum(), 0, 'cls loss should be non-zero')
self.assertGreater(loss_bbox.sum(), 0, 'box loss should be non-zero')
self.assertGreater(loss_mask.sum(), 0, 'mask loss should be non-zero')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
out = roi_head.loss(feats, proposals_list, batch_data_samples)
empty_cls_loss = out['loss_cls']
empty_bbox_loss = out['loss_bbox']
empty_mask_loss = out['loss_mask']
self.assertGreater(empty_cls_loss.sum(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_bbox_loss.sum(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_mask_loss.sum(), 0,
'there should be no mask loss when there are no true boxes')
def test_mask_scoring_roi_head_predict(self):
"""Tests trident roi head predict."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=image_shapes,
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
roi_head.predict(feats, proposals_list, batch_data_samples)
def test_mask_scoring_roi_head_forward(self):
"""Tests trident roi head forward."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
roi_head = MODELS.build(self.roi_head_cfg)
roi_head = roi_head.cuda()
s = 256
feats = []
for i in range(len(roi_head.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
image_shapes = [(3, s, s)]
proposals_list = demo_mm_proposals(
image_shapes=image_shapes, num_proposals=100, device='cuda')
roi_head.forward(feats, proposals_list)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import (BatchFixedSizePad, BatchResize,
BatchSyncRandomResize, BoxInstDataPreprocessor,
DetDataPreprocessor,
MultiBranchDataPreprocessor)
from .reid_data_preprocessor import ReIDDataPreprocessor
from .track_data_preprocessor import TrackDataPreprocessor
__all__ = [
'DetDataPreprocessor', 'BatchSyncRandomResize', 'BatchFixedSizePad',
'MultiBranchDataPreprocessor', 'BatchResize', 'BoxInstDataPreprocessor',
'TrackDataPreprocessor', 'ReIDDataPreprocessor'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import (BatchFixedSizePad, BatchResize,
BatchSyncRandomResize, BoxInstDataPreprocessor,
DetDataPreprocessor,
MultiBranchDataPreprocessor)
__all__ = [
'DetDataPreprocessor', 'BatchSyncRandomResize', 'BatchFixedSizePad',
'MultiBranchDataPreprocessor', 'BatchResize', 'BoxInstDataPreprocessor'
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.boston_housing import load_data as load_data
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.boston_housing import load_data
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
ControlNetModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
enable_full_determinism()
@slow
@require_torch_accelerator
class ControlNetModelSingleFileTests(unittest.TestCase):
model_class = ControlNetModel
ckpt_path = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth"
repo_id = "lllyasviel/control_v11p_sd15_canny"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id)
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between single file loading and pretrained loading"
def test_single_file_arguments(self):
model_default = self.model_class.from_single_file(self.ckpt_path)
assert model_default.config.upcast_attention is False
assert model_default.dtype == torch.float32
torch_dtype = torch.float16
upcast_attention = True
model = self.model_class.from_single_file(
self.ckpt_path,
upcast_attention=upcast_attention,
torch_dtype=torch_dtype,
)
assert model.config.upcast_attention == upcast_attention
assert model.dtype == torch_dtype
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
ControlNetModel,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
require_torch_gpu,
slow,
)
enable_full_determinism()
@slow
@require_torch_gpu
class ControlNetModelSingleFileTests(unittest.TestCase):
model_class = ControlNetModel
ckpt_path = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth"
repo_id = "lllyasviel/control_v11p_sd15_canny"
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id)
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between single file loading and pretrained loading"
def test_single_file_arguments(self):
model_default = self.model_class.from_single_file(self.ckpt_path)
assert model_default.config.upcast_attention is False
assert model_default.dtype == torch.float32
torch_dtype = torch.float16
upcast_attention = True
model = self.model_class.from_single_file(
self.ckpt_path,
upcast_attention=upcast_attention,
torch_dtype=torch_dtype,
)
assert model.config.upcast_attention == upcast_attention
assert model.dtype == torch_dtype
|
"""**Messages** are objects used in prompts and chat conversations.
**Class hierarchy:**
.. code-block::
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
**Main helpers:**
.. code-block::
ChatPromptTemplate
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.messages.ai import (
AIMessage,
AIMessageChunk,
)
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
message_to_dict,
messages_to_dict,
)
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
from langchain_core.messages.content_blocks import (
convert_to_openai_image_block,
is_data_content_block,
)
from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk
from langchain_core.messages.human import HumanMessage, HumanMessageChunk
from langchain_core.messages.modifier import RemoveMessage
from langchain_core.messages.system import SystemMessage, SystemMessageChunk
from langchain_core.messages.tool import (
InvalidToolCall,
ToolCall,
ToolCallChunk,
ToolMessage,
ToolMessageChunk,
)
from langchain_core.messages.utils import (
AnyMessage,
MessageLikeRepresentation,
_message_from_dict,
convert_to_messages,
convert_to_openai_messages,
filter_messages,
get_buffer_string,
merge_message_runs,
message_chunk_to_message,
messages_from_dict,
trim_messages,
)
__all__ = [
"AIMessage",
"AIMessageChunk",
"AnyMessage",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"InvalidToolCall",
"MessageLikeRepresentation",
"SystemMessage",
"SystemMessageChunk",
"ToolCall",
"ToolCallChunk",
"ToolMessage",
"ToolMessageChunk",
"RemoveMessage",
"_message_from_dict",
"convert_to_openai_image_block",
"convert_to_messages",
"get_buffer_string",
"is_data_content_block",
"merge_content",
"message_chunk_to_message",
"message_to_dict",
"messages_from_dict",
"messages_to_dict",
"filter_messages",
"merge_message_runs",
"trim_messages",
"convert_to_openai_messages",
]
_dynamic_imports = {
"AIMessage": "ai",
"AIMessageChunk": "ai",
"BaseMessage": "base",
"BaseMessageChunk": "base",
"merge_content": "base",
"message_to_dict": "base",
"messages_to_dict": "base",
"ChatMessage": "chat",
"ChatMessageChunk": "chat",
"FunctionMessage": "function",
"FunctionMessageChunk": "function",
"HumanMessage": "human",
"HumanMessageChunk": "human",
"RemoveMessage": "modifier",
"SystemMessage": "system",
"SystemMessageChunk": "system",
"InvalidToolCall": "tool",
"ToolCall": "tool",
"ToolCallChunk": "tool",
"ToolMessage": "tool",
"ToolMessageChunk": "tool",
"AnyMessage": "utils",
"MessageLikeRepresentation": "utils",
"_message_from_dict": "utils",
"convert_to_messages": "utils",
"convert_to_openai_image_block": "content_blocks",
"convert_to_openai_messages": "utils",
"filter_messages": "utils",
"get_buffer_string": "utils",
"is_data_content_block": "content_blocks",
"merge_message_runs": "utils",
"message_chunk_to_message": "utils",
"messages_from_dict": "utils",
"trim_messages": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Messages** are objects used in prompts and chat conversations.
**Class hierarchy:**
.. code-block::
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
**Main helpers:**
.. code-block::
ChatPromptTemplate
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.messages.ai import (
AIMessage,
AIMessageChunk,
)
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
message_to_dict,
messages_to_dict,
)
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk
from langchain_core.messages.human import HumanMessage, HumanMessageChunk
from langchain_core.messages.modifier import RemoveMessage
from langchain_core.messages.system import SystemMessage, SystemMessageChunk
from langchain_core.messages.tool import (
InvalidToolCall,
ToolCall,
ToolCallChunk,
ToolMessage,
ToolMessageChunk,
)
from langchain_core.messages.utils import (
AnyMessage,
MessageLikeRepresentation,
_message_from_dict,
convert_to_messages,
convert_to_openai_messages,
filter_messages,
get_buffer_string,
merge_message_runs,
message_chunk_to_message,
messages_from_dict,
trim_messages,
)
__all__ = [
"AIMessage",
"AIMessageChunk",
"AnyMessage",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"InvalidToolCall",
"MessageLikeRepresentation",
"SystemMessage",
"SystemMessageChunk",
"ToolCall",
"ToolCallChunk",
"ToolMessage",
"ToolMessageChunk",
"RemoveMessage",
"_message_from_dict",
"convert_to_messages",
"get_buffer_string",
"merge_content",
"message_chunk_to_message",
"message_to_dict",
"messages_from_dict",
"messages_to_dict",
"filter_messages",
"merge_message_runs",
"trim_messages",
"convert_to_openai_messages",
]
_dynamic_imports = {
"AIMessage": "ai",
"AIMessageChunk": "ai",
"BaseMessage": "base",
"BaseMessageChunk": "base",
"merge_content": "base",
"message_to_dict": "base",
"messages_to_dict": "base",
"ChatMessage": "chat",
"ChatMessageChunk": "chat",
"FunctionMessage": "function",
"FunctionMessageChunk": "function",
"HumanMessage": "human",
"HumanMessageChunk": "human",
"RemoveMessage": "modifier",
"SystemMessage": "system",
"SystemMessageChunk": "system",
"InvalidToolCall": "tool",
"ToolCall": "tool",
"ToolCallChunk": "tool",
"ToolMessage": "tool",
"ToolMessageChunk": "tool",
"AnyMessage": "utils",
"MessageLikeRepresentation": "utils",
"_message_from_dict": "utils",
"convert_to_messages": "utils",
"convert_to_openai_messages": "utils",
"filter_messages": "utils",
"get_buffer_string": "utils",
"merge_message_runs": "utils",
"message_chunk_to_message": "utils",
"messages_from_dict": "utils",
"trim_messages": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_bridgetower import *
from .image_processing_bridgetower import *
from .image_processing_bridgetower_fast import *
from .modeling_bridgetower import *
from .processing_bridgetower import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_bridgetower import *
from .image_processing_bridgetower import *
from .modeling_bridgetower import *
from .processing_bridgetower import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
import os
import json
import time
import pytest
from urllib import request
from jina import Flow
from docarray import Document
from jina import helper
from jina import Executor, requests
from tests import validate_callback
cur_dir = os.path.dirname(os.path.abspath(__file__))
# check if this can be bypassed
IGNORED_FIELDS = ['embedding', 'scores', 'graphInfo', 'evaluations']
@pytest.fixture
def docs():
return [Document(id=f'{idx}', text=f'doc{idx}') for idx in range(10)]
def test_no_matches_grpc(mocker, docs):
def validate_response(resp):
for doc in resp.data.docs:
assert len(doc.matches) == 0
mock_on_done = mocker.Mock()
with Flow().add() as f:
f.search(inputs=docs, on_done=mock_on_done)
validate_callback(mock_on_done, validate_response)
@pytest.fixture
def query_dict():
return {'top_k': 3, 'mode': 'search', 'data': [{'text': 'query'}]}
class MockExecutor(Executor):
@requests
def foo(self, docs, *args, **kwargs):
for doc in docs:
doc.tags['tag'] = 'test'
def test_no_matches_rest(query_dict):
port = helper.random_port()
with Flow(
protocol='http',
port=port,
).add(uses=MockExecutor):
# temporarily adding sleep
time.sleep(0.5)
query = json.dumps(query_dict).encode('utf-8')
req = request.Request(
f'http://localhost:{port}/search',
data=query,
headers={'content-type': 'application/json'},
)
resp = request.urlopen(req).read().decode('utf8')
doc = json.loads(resp)['data'][0]
assert len(Document.from_dict(doc).matches) == 0
assert Document.from_dict(doc).tags['tag'] == 'test'
|
import os
import json
import time
import pytest
from urllib import request
from jina import Flow
from docarray import Document
from jina import helper
from jina import Executor, requests
from tests import validate_callback
cur_dir = os.path.dirname(os.path.abspath(__file__))
# check if this can be bypassed
IGNORED_FIELDS = ['embedding', 'scores', 'graphInfo', 'evaluations']
@pytest.fixture
def docs():
return [Document(id=f'{idx}', text=f'doc{idx}') for idx in range(10)]
def test_no_matches_grpc(mocker, docs):
def validate_response(resp):
for doc in resp.data.docs:
assert len(doc.matches) == 0
mock_on_done = mocker.Mock()
with Flow().add() as f:
f.search(inputs=docs, on_done=mock_on_done)
validate_callback(mock_on_done, validate_response)
@pytest.fixture
def query_dict():
return {'top_k': 3, 'mode': 'search', 'data': [{'text': 'query'}]}
class MockExecutor(Executor):
@requests
def foo(self, docs, *args, **kwargs):
for doc in docs:
doc.tags['tag'] = 'test'
def test_no_matches_rest(query_dict):
port = helper.random_port()
with Flow(
protocol='http',
port=port,
including_default_value_fields=True,
).add(uses=MockExecutor):
# temporarily adding sleep
time.sleep(0.5)
query = json.dumps(query_dict).encode('utf-8')
req = request.Request(
f'http://localhost:{port}/search',
data=query,
headers={'content-type': 'application/json'},
)
resp = request.urlopen(req).read().decode('utf8')
doc = json.loads(resp)['data'][0]
assert len(Document.from_dict(doc).matches) == 0
assert Document.from_dict(doc).tags['tag'] == 'test'
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import (get_device, get_max_cuda_memory, is_cuda_available,
is_mlu_available)
__all__ = [
'get_max_cuda_memory', 'get_device', 'is_cuda_available',
'is_mlu_available'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import get_max_cuda_memory
__all__ = ['get_max_cuda_memory']
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from mmengine.data import BaseDataElement
from mmengine.hooks import NaiveVisualizationHook
class TestNaiveVisualizationHook:
def test_after_train_iter(self):
naive_visualization_hook = NaiveVisualizationHook()
runner = Mock(iter=1)
runner.visualizer.add_image = Mock()
inputs = torch.randn(1, 3, 15, 15)
batch_idx = 10
# test with normalize, resize, pad
gt_datasamples = BaseDataElement(
metainfo=dict(
img_norm_cfg=dict(
mean=(0, 0, 0), std=(0.5, 0.5, 0.5), to_bgr=True),
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with resize, pad
gt_datasamples = BaseDataElement(
metainfo=dict(
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only resize
gt_datasamples = BaseDataElement(
metainfo=dict(
scale=(15, 15), ori_height=5, ori_width=5, img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only pad
gt_datasamples = BaseDataElement(
metainfo=dict(
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test no transform
gt_datasamples = BaseDataElement(
metainfo=dict(ori_height=15, ori_width=15, img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from mmengine.data import BaseDataElement
from mmengine.hooks import NaiveVisualizationHook
class TestNaiveVisualizationHook:
def test_after_train_iter(self):
naive_visualization_hook = NaiveVisualizationHook()
runner = Mock(iter=1)
runner.writer.add_image = Mock()
inputs = torch.randn(1, 3, 15, 15)
batch_idx = 10
# test with normalize, resize, pad
gt_datasamples = [
BaseDataElement(
metainfo=dict(
img_norm_cfg=dict(
mean=(0, 0, 0), std=(0.5, 0.5, 0.5), to_bgr=True),
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with resize, pad
gt_datasamples = [
BaseDataElement(
metainfo=dict(
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only resize
gt_datasamples = [
BaseDataElement(
metainfo=dict(
scale=(15, 15),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only pad
gt_datasamples = [
BaseDataElement(
metainfo=dict(
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test no transform
gt_datasamples = [
BaseDataElement(
metainfo=dict(ori_height=15, ori_width=15,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
|
__version__ = "3.0.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .similarity_functions import SimilarityFunction
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
from .trainer import SentenceTransformerTrainer
from .training_args import SentenceTransformerTrainingArguments
from .model_card import SentenceTransformerModelCardData
from .quantization import quantize_embeddings
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
]
|
__version__ = "2.8.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
from .trainer import SentenceTransformerTrainer
from .training_args import SentenceTransformerTrainingArguments
from .model_card import SentenceTransformerModelCardData
from .quantization import quantize_embeddings
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
]
|
# mypy: allow-untyped-defs
"""List of Python standard library modules.
Sadly, there is no reliable way to tell whether a module is part of the
standard library except by comparing to a canonical list.
This is taken from https://github.com/PyCQA/isort/tree/develop/isort/stdlibs,
which itself is sourced from the Python documentation.
"""
import sys
def is_stdlib_module(module: str) -> bool:
base_module = module.partition(".")[0]
return base_module in _get_stdlib_modules()
def _get_stdlib_modules():
if sys.version_info.major == 3: # noqa: UP036
if sys.version_info.minor == 9:
return stdlib3_9
if sys.version_info.minor >= 10: # noqa: YTT204
return sys.stdlib_module_names # type: ignore[attr-defined]
elif sys.version_info.major > 3: # noqa: UP036
return sys.stdlib_module_names # type: ignore[attr-defined]
raise RuntimeError(f"Unsupported Python version: {sys.version_info}")
stdlib3_9 = {
"_thread",
"abc",
"aifc",
"argparse",
"array",
"ast",
"asynchat",
"asyncio",
"asyncore",
"atexit",
"audioop",
"base64",
"bdb",
"binascii",
"binhex",
"bisect",
"builtins",
"bz2",
"cProfile",
"calendar",
"cgi",
"cgitb",
"chunk",
"cmath",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
"concurrent",
"configparser",
"contextlib",
"contextvars",
"copy",
"copyreg",
"crypt",
"csv",
"ctypes",
"curses",
"dataclasses",
"datetime",
"dbm",
"decimal",
"difflib",
"dis",
"distutils",
"doctest",
"email",
"encodings",
"ensurepip",
"enum",
"errno",
"faulthandler",
"fcntl",
"filecmp",
"fileinput",
"fnmatch",
"formatter",
"fractions",
"ftplib",
"functools",
"gc",
"getopt",
"getpass",
"gettext",
"glob",
"graphlib",
"grp",
"gzip",
"hashlib",
"heapq",
"hmac",
"html",
"http",
"imaplib",
"imghdr",
"imp",
"importlib",
"inspect",
"io",
"ipaddress",
"itertools",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"lzma",
"mailbox",
"mailcap",
"marshal",
"math",
"mimetypes",
"mmap",
"modulefinder",
"msilib",
"msvcrt",
"multiprocessing",
"netrc",
"nis",
"nntplib",
"ntpath",
"numbers",
"operator",
"optparse",
"os",
"ossaudiodev",
"parser",
"pathlib",
"pdb",
"pickle",
"pickletools",
"pipes",
"pkgutil",
"platform",
"plistlib",
"poplib",
"posix",
"posixpath",
"pprint",
"profile",
"pstats",
"pty",
"pwd",
"py_compile",
"pyclbr",
"pydoc",
"queue",
"quopri",
"random",
"re",
"readline",
"reprlib",
"resource",
"rlcompleter",
"runpy",
"sched",
"secrets",
"select",
"selectors",
"shelve",
"shlex",
"shutil",
"signal",
"site",
"smtpd",
"smtplib",
"sndhdr",
"socket",
"socketserver",
"spwd",
"sqlite3",
"sre",
"sre_compile",
"sre_constants",
"sre_parse",
"ssl",
"stat",
"statistics",
"string",
"stringprep",
"struct",
"subprocess",
"sunau",
"symbol",
"symtable",
"sys",
"sysconfig",
"syslog",
"tabnanny",
"tarfile",
"telnetlib",
"tempfile",
"termios",
"test",
"textwrap",
"threading",
"time",
"timeit",
"tkinter",
"token",
"tokenize",
"trace",
"traceback",
"tracemalloc",
"tty",
"turtle",
"turtledemo",
"types",
"typing",
"unicodedata",
"unittest",
"urllib",
"uu",
"uuid",
"venv",
"warnings",
"wave",
"weakref",
"webbrowser",
"winreg",
"winsound",
"wsgiref",
"xdrlib",
"xml",
"xmlrpc",
"zipapp",
"zipfile",
"zipimport",
"zlib",
"zoneinfo",
}
|
# mypy: allow-untyped-defs
"""List of Python standard library modules.
Sadly, there is no reliable way to tell whether a module is part of the
standard library except by comparing to a canonical list.
This is taken from https://github.com/PyCQA/isort/tree/develop/isort/stdlibs,
which itself is sourced from the Python documentation.
"""
import sys
def is_stdlib_module(module: str) -> bool:
base_module = module.partition(".")[0]
return base_module in _get_stdlib_modules()
def _get_stdlib_modules():
if sys.version_info.major == 3:
if sys.version_info.minor == 9:
return stdlib3_9
if sys.version_info.minor >= 10: # noqa: YTT204
return sys.stdlib_module_names # type: ignore[attr-defined]
elif sys.version_info.major > 3:
return sys.stdlib_module_names # type: ignore[attr-defined]
raise RuntimeError(f"Unsupported Python version: {sys.version_info}")
stdlib3_9 = {
"_thread",
"abc",
"aifc",
"argparse",
"array",
"ast",
"asynchat",
"asyncio",
"asyncore",
"atexit",
"audioop",
"base64",
"bdb",
"binascii",
"binhex",
"bisect",
"builtins",
"bz2",
"cProfile",
"calendar",
"cgi",
"cgitb",
"chunk",
"cmath",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
"concurrent",
"configparser",
"contextlib",
"contextvars",
"copy",
"copyreg",
"crypt",
"csv",
"ctypes",
"curses",
"dataclasses",
"datetime",
"dbm",
"decimal",
"difflib",
"dis",
"distutils",
"doctest",
"email",
"encodings",
"ensurepip",
"enum",
"errno",
"faulthandler",
"fcntl",
"filecmp",
"fileinput",
"fnmatch",
"formatter",
"fractions",
"ftplib",
"functools",
"gc",
"getopt",
"getpass",
"gettext",
"glob",
"graphlib",
"grp",
"gzip",
"hashlib",
"heapq",
"hmac",
"html",
"http",
"imaplib",
"imghdr",
"imp",
"importlib",
"inspect",
"io",
"ipaddress",
"itertools",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"lzma",
"mailbox",
"mailcap",
"marshal",
"math",
"mimetypes",
"mmap",
"modulefinder",
"msilib",
"msvcrt",
"multiprocessing",
"netrc",
"nis",
"nntplib",
"ntpath",
"numbers",
"operator",
"optparse",
"os",
"ossaudiodev",
"parser",
"pathlib",
"pdb",
"pickle",
"pickletools",
"pipes",
"pkgutil",
"platform",
"plistlib",
"poplib",
"posix",
"posixpath",
"pprint",
"profile",
"pstats",
"pty",
"pwd",
"py_compile",
"pyclbr",
"pydoc",
"queue",
"quopri",
"random",
"re",
"readline",
"reprlib",
"resource",
"rlcompleter",
"runpy",
"sched",
"secrets",
"select",
"selectors",
"shelve",
"shlex",
"shutil",
"signal",
"site",
"smtpd",
"smtplib",
"sndhdr",
"socket",
"socketserver",
"spwd",
"sqlite3",
"sre",
"sre_compile",
"sre_constants",
"sre_parse",
"ssl",
"stat",
"statistics",
"string",
"stringprep",
"struct",
"subprocess",
"sunau",
"symbol",
"symtable",
"sys",
"sysconfig",
"syslog",
"tabnanny",
"tarfile",
"telnetlib",
"tempfile",
"termios",
"test",
"textwrap",
"threading",
"time",
"timeit",
"tkinter",
"token",
"tokenize",
"trace",
"traceback",
"tracemalloc",
"tty",
"turtle",
"turtledemo",
"types",
"typing",
"unicodedata",
"unittest",
"urllib",
"uu",
"uuid",
"venv",
"warnings",
"wave",
"weakref",
"webbrowser",
"winreg",
"winsound",
"wsgiref",
"xdrlib",
"xml",
"xmlrpc",
"zipapp",
"zipfile",
"zipimport",
"zlib",
"zoneinfo",
}
|
from typing import Iterator, MutableSequence, TypeVar
from docarray.array.doc_list.sequence_indexing_mixin import IndexingSequenceMixin
T_item = TypeVar('T_item')
class ListAdvancedIndexing(IndexingSequenceMixin[T_item]):
"""
A list wrapper that implements custom indexing
You can index into a ListAdvanceIndex like a numpy array or torch tensor:
---
```python
docs[0] # index by position
docs[0:5:2] # index by slice
docs[[0, 2, 3]] # index by list of indices
docs[True, False, True, True, ...] # index by boolean mask
```
---
"""
_data: MutableSequence[T_item]
def __init__(self, data: MutableSequence[T_item]):
self._data = data
@property
def data(self) -> MutableSequence[T_item]:
return self._data
def __len__(self) -> int:
return len(self._data)
def __iter__(self) -> Iterator[T_item]:
for item in self._data:
yield item
|
from typing import Iterator, MutableSequence, TypeVar
from docarray.array.doc_list.sequence_indexing_mixin import IndexingSequenceMixin
T_item = TypeVar('T_item')
class ListAdvancedIndexing(IndexingSequenceMixin[T_item]):
"""
A list wrapper that implements custom indexing
You can index into a ListAdvanceIndex like a numpy array or torch tensor:
.. code-block:: python
docs[0] # index by position
docs[0:5:2] # index by slice
docs[[0, 2, 3]] # index by list of indices
docs[True, False, True, True, ...] # index by boolean mask
"""
_data: MutableSequence[T_item]
def __init__(self, data: MutableSequence[T_item]):
self._data = data
@property
def data(self) -> MutableSequence[T_item]:
return self._data
def __len__(self) -> int:
return len(self._data)
def __iter__(self) -> Iterator[T_item]:
for item in self._data:
yield item
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Type, Union
from docarray.document import BaseDocument
if TYPE_CHECKING:
from docarray.typing import NdArray, TorchTensor
class AbstractDocumentArray(Sequence):
document_type: Type[BaseDocument]
_columns: Optional[
Dict[str, Union['TorchTensor', 'AbstractDocumentArray', 'NdArray', None]]
] # here columns are the holder of the data in tensor modes
@abstractmethod
def __init__(self, docs: Iterable[BaseDocument]):
...
@abstractmethod
def __class_getitem__(
cls, item: Type[BaseDocument]
) -> Type['AbstractDocumentArray']:
...
@abstractmethod
def is_stacked(self) -> bool:
...
@abstractmethod
def _column_fields(self) -> List[str]:
...
@abstractmethod
def __iter_over_stacked_documents__(self) -> Iterable[BaseDocument]:
...
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Type, Union
from docarray.document import BaseDocument
if TYPE_CHECKING:
from docarray.typing import NdArray, TorchTensor
class AbstractDocumentArray(Sequence):
document_type: Type[BaseDocument]
_columns: Optional[
Dict[str, Union['TorchTensor', 'AbstractDocumentArray', 'NdArray', None]]
] # here columns are the holder of the data in tensor modes
@abstractmethod
def __init__(self, docs: Iterable[BaseDocument]):
...
@abstractmethod
def __class_getitem__(
cls, item: Type[BaseDocument]
) -> Type['AbstractDocumentArray']:
...
@abstractmethod
def is_stacked(self) -> bool:
...
@abstractmethod
def _column_fields(self) -> List[str]:
...
|
# ruff: noqa: E501
"""Test LLMCheckerChain functionality."""
import pytest
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_checker.prompt import (
_CHECK_ASSERTIONS_TEMPLATE,
_CREATE_DRAFT_ANSWER_TEMPLATE,
_LIST_ASSERTIONS_TEMPLATE,
_REVISED_ANSWER_TEMPLATE,
)
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.fixture
def fake_llm_checker_chain() -> LLMCheckerChain:
"""Fake LLMCheckerChain for testing."""
queries = {
_CREATE_DRAFT_ANSWER_TEMPLATE.format(
question="Which mammal lays the biggest eggs?",
): "I don't know which mammal layers the biggest eggs.",
_LIST_ASSERTIONS_TEMPLATE.format(
statement="I don't know which mammal layers the biggest eggs.",
): "1) I know that mammals lay eggs.\n2) I know that birds lay eggs.\n3) I know that birds are mammals.",
_CHECK_ASSERTIONS_TEMPLATE.format(
assertions="1) I know that mammals lay eggs.\n2) I know that birds lay eggs.\n3) I know that birds are mammals.",
): "1) I know that mammals lay eggs. TRUE\n2) I know that birds lay eggs. TRUE\n3) I know that birds are mammals. TRUE",
_REVISED_ANSWER_TEMPLATE.format(
checked_assertions="1) I know that mammals lay eggs. TRUE\n2) I know that birds lay eggs. TRUE\n3) I know that birds are mammals. TRUE",
question="Which mammal lays the biggest eggs?",
): "I still don't know.",
}
fake_llm = FakeLLM(queries=queries)
return LLMCheckerChain.from_llm(fake_llm, input_key="q", output_key="a")
def test_simple_question(fake_llm_checker_chain: LLMCheckerChain) -> None:
"""Test simple question that should not need python."""
question = "Which mammal lays the biggest eggs?"
output = fake_llm_checker_chain.run(question)
assert output == "I still don't know."
|
# ruff: noqa: E501
"""Test LLMCheckerChain functionality."""
import pytest
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_checker.prompt import (
_CHECK_ASSERTIONS_TEMPLATE,
_CREATE_DRAFT_ANSWER_TEMPLATE,
_LIST_ASSERTIONS_TEMPLATE,
_REVISED_ANSWER_TEMPLATE,
)
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.fixture
def fake_llm_checker_chain() -> LLMCheckerChain:
"""Fake LLMCheckerChain for testing."""
queries = {
_CREATE_DRAFT_ANSWER_TEMPLATE.format(
question="Which mammal lays the biggest eggs?"
): "I don't know which mammal layers the biggest eggs.",
_LIST_ASSERTIONS_TEMPLATE.format(
statement="I don't know which mammal layers the biggest eggs.",
): "1) I know that mammals lay eggs.\n2) I know that birds lay eggs.\n3) I know that birds are mammals.",
_CHECK_ASSERTIONS_TEMPLATE.format(
assertions="1) I know that mammals lay eggs.\n2) I know that birds lay eggs.\n3) I know that birds are mammals.",
): "1) I know that mammals lay eggs. TRUE\n2) I know that birds lay eggs. TRUE\n3) I know that birds are mammals. TRUE",
_REVISED_ANSWER_TEMPLATE.format(
checked_assertions="1) I know that mammals lay eggs. TRUE\n2) I know that birds lay eggs. TRUE\n3) I know that birds are mammals. TRUE",
question="Which mammal lays the biggest eggs?",
): "I still don't know.",
}
fake_llm = FakeLLM(queries=queries)
return LLMCheckerChain.from_llm(fake_llm, input_key="q", output_key="a")
def test_simple_question(fake_llm_checker_chain: LLMCheckerChain) -> None:
"""Test simple question that should not need python."""
question = "Which mammal lays the biggest eggs?"
output = fake_llm_checker_chain.run(question)
assert output == "I still don't know."
|
import multiprocessing
import time
import pytest
from docarray import DocumentArray, Document
from docarray.helper import random_port
@pytest.mark.parametrize(
'conn_config',
[
(dict(protocol='grpc'), 'grpc://127.0.0.1:$port/'),
(dict(protocol='grpc'), 'grpc://127.0.0.1:$port'),
(dict(protocol='websocket'), 'ws://127.0.0.1:$port'),
# (dict(protocol='http'), 'http://127.0.0.1:$port'), this somehow does not work on GH workflow
],
)
@pytest.mark.parametrize('show_pbar', [True, False])
@pytest.mark.parametrize('batch_size', [None, 1, 10])
def test_post_to_a_flow(show_pbar, conn_config, batch_size):
from jina import Flow
p = random_port()
da = DocumentArray.empty(100)
with Flow(**{**conn_config[0], 'port': p}):
da.post(conn_config[1].replace('$port', str(p)), batch_size=batch_size)
@pytest.mark.parametrize(
'hub_uri',
[
'jinahub://Hello',
'jinahub+sandbox://Hello',
# 'jinahub+docker://Hello', this somehow does not work on GH workflow
],
)
def test_post_with_jinahub(hub_uri):
da = DocumentArray.empty(100)
da.post(hub_uri)
assert isinstance(Document().post(hub_uri), Document)
def test_post_bad_scheme():
da = DocumentArray.empty(100)
with pytest.raises(ValueError):
da.post('haha')
def test_endpoint():
from jina import Executor, requests, Flow
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray, **kwargs):
docs.texts = ['foo'] * len(docs)
@requests(on='/bar')
def bar(self, docs: DocumentArray, **kwargs):
docs.texts = ['bar'] * len(docs)
def start_flow(stop_event, **kwargs):
"""start a blocking Flow."""
with Flow(**kwargs).add(uses=MyExec) as f:
f.block(stop_event=stop_event)
e = multiprocessing.Event() # create new Event
p = random_port()
t = multiprocessing.Process(
name='Blocked-Flow', target=start_flow, args=(e,), kwargs={'port': p}
)
t.start()
time.sleep(5)
N = 100
da = DocumentArray.empty(N)
try:
assert da.post(f'grpc://127.0.0.1:{p}/')[:, 'text'] == [''] * N
assert da.post(f'grpc://127.0.0.1:{p}/foo').texts == ['foo'] * N
assert da.post(f'grpc://127.0.0.1:{p}/bar').texts == ['bar'] * N
except:
raise
finally:
e.set()
t.join()
|
import multiprocessing
import time
import pytest
from docarray import DocumentArray
from docarray.helper import random_port
@pytest.mark.parametrize(
'conn_config',
[
(dict(protocol='grpc'), 'grpc://127.0.0.1:$port/'),
(dict(protocol='grpc'), 'grpc://127.0.0.1:$port'),
(dict(protocol='websocket'), 'ws://127.0.0.1:$port'),
# (dict(protocol='http'), 'http://127.0.0.1:$port'), this somehow does not work on GH workflow
],
)
@pytest.mark.parametrize('show_pbar', [True, False])
@pytest.mark.parametrize('batch_size', [None, 1, 10])
def test_post_to_a_flow(show_pbar, conn_config, batch_size):
from jina import Flow
p = random_port()
da = DocumentArray.empty(100)
with Flow(**{**conn_config[0], 'port': p}):
da.post(conn_config[1].replace('$port', str(p)), batch_size=batch_size)
@pytest.mark.parametrize(
'hub_uri',
[
'jinahub://Hello',
'jinahub+sandbox://Hello',
# 'jinahub+docker://Hello', this somehow does not work on GH workflow
],
)
def test_post_with_jinahub(hub_uri):
da = DocumentArray.empty(100)
da.post(hub_uri)
def test_post_bad_scheme():
da = DocumentArray.empty(100)
with pytest.raises(ValueError):
da.post('haha')
def test_endpoint():
from jina import Executor, requests, Flow
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray, **kwargs):
docs.texts = ['foo'] * len(docs)
@requests(on='/bar')
def bar(self, docs: DocumentArray, **kwargs):
docs.texts = ['bar'] * len(docs)
def start_flow(stop_event, **kwargs):
"""start a blocking Flow."""
with Flow(**kwargs).add(uses=MyExec) as f:
f.block(stop_event=stop_event)
e = multiprocessing.Event() # create new Event
p = random_port()
t = multiprocessing.Process(
name='Blocked-Flow', target=start_flow, args=(e,), kwargs={'port': p}
)
t.start()
time.sleep(5)
N = 100
da = DocumentArray.empty(N)
try:
assert da.post(f'grpc://127.0.0.1:{p}/')[:, 'text'] == [''] * N
assert da.post(f'grpc://127.0.0.1:{p}/foo').texts == ['foo'] * N
assert da.post(f'grpc://127.0.0.1:{p}/bar').texts == ['bar'] * N
except:
raise
finally:
e.set()
t.join()
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import BaseDataElement as PixelData
from mmengine.data import InstanceData
from mmdet.core import DetDataSample
from mmdet.core.mask import BitmapMasks
from mmdet.datasets.pipelines import PackDetInputs
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_height': 300,
'ori_width': 400,
'height': 600,
'width': 800,
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool),
'proposals': rng.rand(2, 4)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_height': 300,
'ori_width': 400,
'height': 600,
'width': 800,
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4)
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 2)
self.assertEqual(len(results['data_sample'].ignored_instances), 1)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 3)
self.assertEqual(len(results['data_sample'].ignored_instances), 0)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import BaseDataElement as PixelData
from mmengine.data import InstanceData
from mmdet.core import DetDataSample
from mmdet.core.mask import BitmapMasks
from mmdet.datasets.pipelines import PackDetInputs
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_height': 300,
'ori_width': 400,
'height': 600,
'width': 800,
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_height': 300,
'ori_width': 400,
'height': 600,
'width': 800,
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, )
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 2)
self.assertEqual(len(results['data_sample'].ignored_instances), 1)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 3)
self.assertEqual(len(results['data_sample'].ignored_instances), 0)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from .utils import demo_mm_inputs, get_detector_cfg
class TestRPN(TestCase):
@parameterized.expand(['rpn/rpn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
assert detector.backbone
assert detector.neck
assert detector.rpn_head
assert detector.device.type == 'cpu'
# if rpn.num_classes > 1, force set rpn.num_classes = 1
model.rpn_head.num_classes = 2
detector = build_detector(model)
assert detector.rpn_head.num_classes == 1
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_rpn_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward train
losses = detector.forward(packed_inputs, return_loss=True)
assert isinstance(losses, dict)
# Test forward_dummy
batch = torch.ones((1, 3, 64, 64)).to(device=device)
out = detector.forward_dummy(batch)
assert isinstance(out, tuple)
assert len(out) == 2
@parameterized.expand([('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
# backbone convert to ResNet18
model.backbone.depth = 18
model.neck.in_channels = [64, 128, 256, 512]
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
packed_inputs, return_loss=False)
assert len(batch_results) == 2
isinstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from tests.test_models.test_detectors.test_single_stage import (
_demo_mm_inputs, _get_detector_cfg)
class TestRPN(TestCase):
@parameterized.expand([
'rpn/rpn_r50_fpn_1x_coco.py',
])
def test_init(self, cfg_file):
model = _get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
assert detector.backbone
assert detector.rpn_head
assert detector.device.type == 'cpu'
@parameterized.expand([
('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
])
def test_rpn_forward_train(self, cfg_file, devices):
model = _get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = _demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward train
losses = detector.forward(packed_inputs, return_loss=True)
assert isinstance(losses, dict)
@parameterized.expand([
('rpn/rpn_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
])
def test_single_stage_forward_test(self, cfg_file, devices):
model = _get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = _demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
packed_inputs, return_loss=False)
assert len(batch_results) == 2
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Queries: 323
Corpus: 3269
Score-Function: dot
Accuracy@1: 50.46%
Accuracy@3: 64.40%
Accuracy@5: 67.49%
Accuracy@10: 72.14%
Precision@1: 50.46%
Precision@3: 40.87%
Precision@5: 34.12%
Precision@10: 26.10%
Recall@1: 6.11%
Recall@3: 11.73%
Recall@5: 13.64%
Recall@10: 17.24%
MRR@10: 0.5801
NDCG@10: 0.3626
MAP@100: 0.1832
Model Sparsity Stats Query : Row Non-Zero Mean: 43.08049392700195, Row Sparsity Mean: 0.9985886216163635
Model Sparsity Stats Corpus : Row Non-Zero Mean: 206.8623504638672, Row Sparsity Mean: 0.9932224750518799
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.3626
|
import logging
import random
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseInformationRetrievalEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load the Touche-2020 IR dataset (https://huggingface.co/datasets/BeIR/webis-touche2020, https://huggingface.co/datasets/BeIR/webis-touche2020-qrels)
corpus = load_dataset("BeIR/webis-touche2020", "corpus", split="corpus")
queries = load_dataset("BeIR/webis-touche2020", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/webis-touche2020-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 30,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=30_000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-touche2020-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Version utils."""
import dataclasses
import re
from dataclasses import dataclass
from typing import Optional, Union
_VERSION_TMPL = r"^(?P<major>{v})" r"\.(?P<minor>{v})" r"\.(?P<patch>{v})$"
_VERSION_WILDCARD_REG = re.compile(_VERSION_TMPL.format(v=r"\d+|\*"))
_VERSION_RESOLVED_REG = re.compile(_VERSION_TMPL.format(v=r"\d+"))
@dataclass
class Version:
"""Dataset version MAJOR.MINOR.PATCH.
Args:
version_str (:obj:`str`): Eg: "1.2.3".
description (:obj:`str`): A description of what is new in this version.
Attributes:
version_str (:obj:`str`): Eg: "1.2.3".
description (:obj:`str`): A description of what is new in this version.
major (:obj:`str`):
minor (:obj:`str`):
patch (:obj:`str`):
Example:
```py
>>> VERSION = datasets.Version("1.0.0")
```
"""
version_str: str
description: Optional[str] = None
major: Optional[Union[str, int]] = None
minor: Optional[Union[str, int]] = None
patch: Optional[Union[str, int]] = None
def __post_init__(self):
self.major, self.minor, self.patch = _str_to_version(self.version_str)
def __repr__(self):
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def tuple(self):
return self.major, self.minor, self.patch
def _validate_operand(self, other):
if isinstance(other, str):
return Version(other)
elif isinstance(other, Version):
return other
raise TypeError(f"{other} (type {type(other)}) cannot be compared to version.")
def __eq__(self, other):
try:
other = self._validate_operand(other)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
other = self._validate_operand(other)
return self.tuple < other.tuple
def __le__(self, other):
other = self._validate_operand(other)
return self.tuple <= other.tuple
def __gt__(self, other):
other = self._validate_operand(other)
return self.tuple > other.tuple
def __ge__(self, other):
other = self._validate_operand(other)
return self.tuple >= other.tuple
def match(self, other_version):
"""Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
"""
major, minor, patch = _str_to_version(other_version, allow_wildcard=True)
return major in [self.major, "*"] and minor in [self.minor, "*"] and patch in [self.patch, "*"]
@classmethod
def from_dict(cls, dic):
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _to_yaml_string(self) -> str:
return self.version_str
def _str_to_version(version_str, allow_wildcard=False):
"""Return the tuple (major, minor, patch) version extracted from the str."""
reg = _VERSION_WILDCARD_REG if allow_wildcard else _VERSION_RESOLVED_REG
res = reg.match(version_str)
if not res:
msg = f"Invalid version '{version_str}'. Format should be x.y.z"
if allow_wildcard:
msg += " with {x,y,z} being digits or wildcard."
else:
msg += " with {x,y,z} being digits."
raise ValueError(msg)
return tuple(v if v == "*" else int(v) for v in [res.group("major"), res.group("minor"), res.group("patch")])
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Version utils."""
import dataclasses
import re
from dataclasses import dataclass
from typing import Optional, Union
_VERSION_TMPL = r"^(?P<major>{v})" r"\.(?P<minor>{v})" r"\.(?P<patch>{v})$"
_VERSION_WILDCARD_REG = re.compile(_VERSION_TMPL.format(v=r"\d+|\*"))
_VERSION_RESOLVED_REG = re.compile(_VERSION_TMPL.format(v=r"\d+"))
@dataclass
class Version:
"""Dataset version MAJOR.MINOR.PATCH.
Args:
version_str (:obj:`str`): Eg: "1.2.3".
description (:obj:`str`): A description of what is new in this version.
Attributes:
version_str (:obj:`str`): Eg: "1.2.3".
description (:obj:`str`): A description of what is new in this version.
major (:obj:`str`):
minor (:obj:`str`):
patch (:obj:`str`):
Example:
```py
>>> VERSION = datasets.Version("1.0.0")
```
"""
version_str: str
description: Optional[str] = None
major: Optional[Union[str, int]] = None
minor: Optional[Union[str, int]] = None
patch: Optional[Union[str, int]] = None
def __post_init__(self):
self.major, self.minor, self.patch = _str_to_version(self.version_str)
def __repr__(self):
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def tuple(self):
return self.major, self.minor, self.patch
def _validate_operand(self, other):
if isinstance(other, str):
return Version(other)
elif isinstance(other, Version):
return other
raise AssertionError(f"{other} (type {type(other)}) cannot be compared to version.")
def __eq__(self, other):
try:
other = self._validate_operand(other)
except (AssertionError, ValueError):
return False
else:
return self.tuple == other.tuple
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
other = self._validate_operand(other)
return self.tuple < other.tuple
def __le__(self, other):
other = self._validate_operand(other)
return self.tuple <= other.tuple
def __gt__(self, other):
other = self._validate_operand(other)
return self.tuple > other.tuple
def __ge__(self, other):
other = self._validate_operand(other)
return self.tuple >= other.tuple
def match(self, other_version):
"""Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
"""
major, minor, patch = _str_to_version(other_version, allow_wildcard=True)
return major in [self.major, "*"] and minor in [self.minor, "*"] and patch in [self.patch, "*"]
@classmethod
def from_dict(cls, dic):
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _to_yaml_string(self) -> str:
return self.version_str
def _str_to_version(version_str, allow_wildcard=False):
"""Return the tuple (major, minor, patch) version extracted from the str."""
reg = _VERSION_WILDCARD_REG if allow_wildcard else _VERSION_RESOLVED_REG
res = reg.match(version_str)
if not res:
msg = f"Invalid version '{version_str}'. Format should be x.y.z"
if allow_wildcard:
msg += " with {x,y,z} being digits or wildcard."
else:
msg += " with {x,y,z} being digits."
raise ValueError(msg)
return tuple(v if v == "*" else int(v) for v in [res.group("major"), res.group("minor"), res.group("patch")])
|
from collections.abc import AsyncIterator, Iterator, Sequence
from typing import (
Any,
Callable,
Optional,
TypeVar,
Union,
)
from langchain_core.stores import BaseStore
K = TypeVar("K")
V = TypeVar("V")
class EncoderBackedStore(BaseStore[K, V]):
"""Wraps a store with key and value encoders/decoders.
Examples that uses JSON for encoding/decoding:
.. code-block:: python
import json
def key_encoder(key: int) -> str:
return json.dumps(key)
def value_serializer(value: float) -> str:
return json.dumps(value)
def value_deserializer(serialized_value: str) -> float:
return json.loads(serialized_value)
# Create an instance of the abstract store
abstract_store = MyCustomStore()
# Create an instance of the encoder-backed store
store = EncoderBackedStore(
store=abstract_store,
key_encoder=key_encoder,
value_serializer=value_serializer,
value_deserializer=value_deserializer
)
# Use the encoder-backed store methods
store.mset([(1, 3.14), (2, 2.718)])
values = store.mget([1, 2]) # Retrieves [3.14, 2.718]
store.mdelete([1, 2]) # Deletes the keys 1 and 2
"""
def __init__(
self,
store: BaseStore[str, Any],
key_encoder: Callable[[K], str],
value_serializer: Callable[[V], bytes],
value_deserializer: Callable[[Any], V],
) -> None:
"""Initialize an EncodedStore."""
self.store = store
self.key_encoder = key_encoder
self.value_serializer = value_serializer
self.value_deserializer = value_deserializer
def mget(self, keys: Sequence[K]) -> list[Optional[V]]:
"""Get the values associated with the given keys."""
encoded_keys: list[str] = [self.key_encoder(key) for key in keys]
values = self.store.mget(encoded_keys)
return [
self.value_deserializer(value) if value is not None else value
for value in values
]
async def amget(self, keys: Sequence[K]) -> list[Optional[V]]:
"""Get the values associated with the given keys."""
encoded_keys: list[str] = [self.key_encoder(key) for key in keys]
values = await self.store.amget(encoded_keys)
return [
self.value_deserializer(value) if value is not None else value
for value in values
]
def mset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None:
"""Set the values for the given keys."""
encoded_pairs = [
(self.key_encoder(key), self.value_serializer(value))
for key, value in key_value_pairs
]
self.store.mset(encoded_pairs)
async def amset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None:
"""Set the values for the given keys."""
encoded_pairs = [
(self.key_encoder(key), self.value_serializer(value))
for key, value in key_value_pairs
]
await self.store.amset(encoded_pairs)
def mdelete(self, keys: Sequence[K]) -> None:
"""Delete the given keys and their associated values."""
encoded_keys = [self.key_encoder(key) for key in keys]
self.store.mdelete(encoded_keys)
async def amdelete(self, keys: Sequence[K]) -> None:
"""Delete the given keys and their associated values."""
encoded_keys = [self.key_encoder(key) for key in keys]
await self.store.amdelete(encoded_keys)
def yield_keys(
self,
*,
prefix: Optional[str] = None,
) -> Union[Iterator[K], Iterator[str]]:
"""Get an iterator over keys that match the given prefix."""
# For the time being this does not return K, but str
# it's for debugging purposes. Should fix this.
yield from self.store.yield_keys(prefix=prefix)
async def ayield_keys(
self,
*,
prefix: Optional[str] = None,
) -> Union[AsyncIterator[K], AsyncIterator[str]]:
"""Get an iterator over keys that match the given prefix."""
# For the time being this does not return K, but str
# it's for debugging purposes. Should fix this.
async for key in self.store.ayield_keys(prefix=prefix):
yield key
|
from collections.abc import AsyncIterator, Iterator, Sequence
from typing import (
Any,
Callable,
Optional,
TypeVar,
Union,
)
from langchain_core.stores import BaseStore
K = TypeVar("K")
V = TypeVar("V")
class EncoderBackedStore(BaseStore[K, V]):
"""Wraps a store with key and value encoders/decoders.
Examples that uses JSON for encoding/decoding:
.. code-block:: python
import json
def key_encoder(key: int) -> str:
return json.dumps(key)
def value_serializer(value: float) -> str:
return json.dumps(value)
def value_deserializer(serialized_value: str) -> float:
return json.loads(serialized_value)
# Create an instance of the abstract store
abstract_store = MyCustomStore()
# Create an instance of the encoder-backed store
store = EncoderBackedStore(
store=abstract_store,
key_encoder=key_encoder,
value_serializer=value_serializer,
value_deserializer=value_deserializer
)
# Use the encoder-backed store methods
store.mset([(1, 3.14), (2, 2.718)])
values = store.mget([1, 2]) # Retrieves [3.14, 2.718]
store.mdelete([1, 2]) # Deletes the keys 1 and 2
"""
def __init__(
self,
store: BaseStore[str, Any],
key_encoder: Callable[[K], str],
value_serializer: Callable[[V], bytes],
value_deserializer: Callable[[Any], V],
) -> None:
"""Initialize an EncodedStore."""
self.store = store
self.key_encoder = key_encoder
self.value_serializer = value_serializer
self.value_deserializer = value_deserializer
def mget(self, keys: Sequence[K]) -> list[Optional[V]]:
"""Get the values associated with the given keys."""
encoded_keys: list[str] = [self.key_encoder(key) for key in keys]
values = self.store.mget(encoded_keys)
return [
self.value_deserializer(value) if value is not None else value
for value in values
]
async def amget(self, keys: Sequence[K]) -> list[Optional[V]]:
"""Get the values associated with the given keys."""
encoded_keys: list[str] = [self.key_encoder(key) for key in keys]
values = await self.store.amget(encoded_keys)
return [
self.value_deserializer(value) if value is not None else value
for value in values
]
def mset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None:
"""Set the values for the given keys."""
encoded_pairs = [
(self.key_encoder(key), self.value_serializer(value))
for key, value in key_value_pairs
]
self.store.mset(encoded_pairs)
async def amset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None:
"""Set the values for the given keys."""
encoded_pairs = [
(self.key_encoder(key), self.value_serializer(value))
for key, value in key_value_pairs
]
await self.store.amset(encoded_pairs)
def mdelete(self, keys: Sequence[K]) -> None:
"""Delete the given keys and their associated values."""
encoded_keys = [self.key_encoder(key) for key in keys]
self.store.mdelete(encoded_keys)
async def amdelete(self, keys: Sequence[K]) -> None:
"""Delete the given keys and their associated values."""
encoded_keys = [self.key_encoder(key) for key in keys]
await self.store.amdelete(encoded_keys)
def yield_keys(
self, *, prefix: Optional[str] = None
) -> Union[Iterator[K], Iterator[str]]:
"""Get an iterator over keys that match the given prefix."""
# For the time being this does not return K, but str
# it's for debugging purposes. Should fix this.
yield from self.store.yield_keys(prefix=prefix)
async def ayield_keys(
self, *, prefix: Optional[str] = None
) -> Union[AsyncIterator[K], AsyncIterator[str]]:
"""Get an iterator over keys that match the given prefix."""
# For the time being this does not return K, but str
# it's for debugging purposes. Should fix this.
async for key in self.store.ayield_keys(prefix=prefix):
yield key
|
# Copyright 2025 Open AI and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class DifferentiableProjectiveCamera:
"""
Implements a batch, differentiable, standard pinhole camera
"""
origin: torch.Tensor # [batch_size x 3]
x: torch.Tensor # [batch_size x 3]
y: torch.Tensor # [batch_size x 3]
z: torch.Tensor # [batch_size x 3]
width: int
height: int
x_fov: float
y_fov: float
shape: Tuple[int]
def __post_init__(self):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
def resolution(self):
return torch.from_numpy(np.array([self.width, self.height], dtype=np.float32))
def fov(self):
return torch.from_numpy(np.array([self.x_fov, self.y_fov], dtype=np.float32))
def get_image_coords(self) -> torch.Tensor:
"""
:return: coords of shape (width * height, 2)
"""
pixel_indices = torch.arange(self.height * self.width)
coords = torch.stack(
[
pixel_indices % self.width,
torch.div(pixel_indices, self.width, rounding_mode="trunc"),
],
axis=1,
)
return coords
@property
def camera_rays(self):
batch_size, *inner_shape = self.shape
inner_batch_size = int(np.prod(inner_shape))
coords = self.get_image_coords()
coords = torch.broadcast_to(coords.unsqueeze(0), [batch_size * inner_batch_size, *coords.shape])
rays = self.get_camera_rays(coords)
rays = rays.view(batch_size, inner_batch_size * self.height * self.width, 2, 3)
return rays
def get_camera_rays(self, coords: torch.Tensor) -> torch.Tensor:
batch_size, *shape, n_coords = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
flat = coords.view(batch_size, -1, 2)
res = self.resolution()
fov = self.fov()
fracs = (flat.float() / (res - 1)) * 2 - 1
fracs = fracs * torch.tan(fov / 2)
fracs = fracs.view(batch_size, -1, 2)
directions = (
self.z.view(batch_size, 1, 3)
+ self.x.view(batch_size, 1, 3) * fracs[:, :, :1]
+ self.y.view(batch_size, 1, 3) * fracs[:, :, 1:]
)
directions = directions / directions.norm(dim=-1, keepdim=True)
rays = torch.stack(
[
torch.broadcast_to(self.origin.view(batch_size, 1, 3), [batch_size, directions.shape[1], 3]),
directions,
],
dim=2,
)
return rays.view(batch_size, *shape, 2, 3)
def resize_image(self, width: int, height: int) -> "DifferentiableProjectiveCamera":
"""
Creates a new camera for the resized view assuming the aspect ratio does not change.
"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin,
x=self.x,
y=self.y,
z=self.z,
width=width,
height=height,
x_fov=self.x_fov,
y_fov=self.y_fov,
)
def create_pan_cameras(size: int) -> DifferentiableProjectiveCamera:
origins = []
xs = []
ys = []
zs = []
for theta in np.linspace(0, 2 * np.pi, num=20):
z = np.array([np.sin(theta), np.cos(theta), -0.5])
z /= np.sqrt(np.sum(z**2))
origin = -z * 4
x = np.array([np.cos(theta), -np.sin(theta), 0.0])
y = np.cross(z, x)
origins.append(origin)
xs.append(x)
ys.append(y)
zs.append(z)
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(origins, axis=0)).float(),
x=torch.from_numpy(np.stack(xs, axis=0)).float(),
y=torch.from_numpy(np.stack(ys, axis=0)).float(),
z=torch.from_numpy(np.stack(zs, axis=0)).float(),
width=size,
height=size,
x_fov=0.7,
y_fov=0.7,
shape=(1, len(xs)),
)
|
# Copyright 2024 Open AI and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class DifferentiableProjectiveCamera:
"""
Implements a batch, differentiable, standard pinhole camera
"""
origin: torch.Tensor # [batch_size x 3]
x: torch.Tensor # [batch_size x 3]
y: torch.Tensor # [batch_size x 3]
z: torch.Tensor # [batch_size x 3]
width: int
height: int
x_fov: float
y_fov: float
shape: Tuple[int]
def __post_init__(self):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
def resolution(self):
return torch.from_numpy(np.array([self.width, self.height], dtype=np.float32))
def fov(self):
return torch.from_numpy(np.array([self.x_fov, self.y_fov], dtype=np.float32))
def get_image_coords(self) -> torch.Tensor:
"""
:return: coords of shape (width * height, 2)
"""
pixel_indices = torch.arange(self.height * self.width)
coords = torch.stack(
[
pixel_indices % self.width,
torch.div(pixel_indices, self.width, rounding_mode="trunc"),
],
axis=1,
)
return coords
@property
def camera_rays(self):
batch_size, *inner_shape = self.shape
inner_batch_size = int(np.prod(inner_shape))
coords = self.get_image_coords()
coords = torch.broadcast_to(coords.unsqueeze(0), [batch_size * inner_batch_size, *coords.shape])
rays = self.get_camera_rays(coords)
rays = rays.view(batch_size, inner_batch_size * self.height * self.width, 2, 3)
return rays
def get_camera_rays(self, coords: torch.Tensor) -> torch.Tensor:
batch_size, *shape, n_coords = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
flat = coords.view(batch_size, -1, 2)
res = self.resolution()
fov = self.fov()
fracs = (flat.float() / (res - 1)) * 2 - 1
fracs = fracs * torch.tan(fov / 2)
fracs = fracs.view(batch_size, -1, 2)
directions = (
self.z.view(batch_size, 1, 3)
+ self.x.view(batch_size, 1, 3) * fracs[:, :, :1]
+ self.y.view(batch_size, 1, 3) * fracs[:, :, 1:]
)
directions = directions / directions.norm(dim=-1, keepdim=True)
rays = torch.stack(
[
torch.broadcast_to(self.origin.view(batch_size, 1, 3), [batch_size, directions.shape[1], 3]),
directions,
],
dim=2,
)
return rays.view(batch_size, *shape, 2, 3)
def resize_image(self, width: int, height: int) -> "DifferentiableProjectiveCamera":
"""
Creates a new camera for the resized view assuming the aspect ratio does not change.
"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin,
x=self.x,
y=self.y,
z=self.z,
width=width,
height=height,
x_fov=self.x_fov,
y_fov=self.y_fov,
)
def create_pan_cameras(size: int) -> DifferentiableProjectiveCamera:
origins = []
xs = []
ys = []
zs = []
for theta in np.linspace(0, 2 * np.pi, num=20):
z = np.array([np.sin(theta), np.cos(theta), -0.5])
z /= np.sqrt(np.sum(z**2))
origin = -z * 4
x = np.array([np.cos(theta), -np.sin(theta), 0.0])
y = np.cross(z, x)
origins.append(origin)
xs.append(x)
ys.append(y)
zs.append(z)
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(origins, axis=0)).float(),
x=torch.from_numpy(np.stack(xs, axis=0)).float(),
y=torch.from_numpy(np.stack(ys, axis=0)).float(),
z=torch.from_numpy(np.stack(zs, axis=0)).float(),
width=size,
height=size,
x_fov=0.7,
y_fov=0.7,
shape=(1, len(xs)),
)
|
import pytest
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_to_from_bytes(protocol, compress):
d = MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png'))
assert d.text == 'hello'
assert d.embedding.tolist() == [1, 2, 3, 4, 5]
assert d.image.url == 'aux.png'
bstr = d.to_bytes(protocol=protocol, compress=compress)
d2 = MyDoc.from_bytes(bstr, protocol=protocol, compress=compress)
assert d2.text == 'hello'
assert d2.embedding.tolist() == [1, 2, 3, 4, 5]
assert d2.image.url == 'aux.png'
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_to_from_base64(protocol, compress):
d = MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png'))
assert d.text == 'hello'
assert d.embedding.tolist() == [1, 2, 3, 4, 5]
assert d.image.url == 'aux.png'
bstr = d.to_base64(protocol=protocol, compress=compress)
d2 = MyDoc.from_base64(bstr, protocol=protocol, compress=compress)
assert d2.text == 'hello'
assert d2.embedding.tolist() == [1, 2, 3, 4, 5]
assert d2.image.url == 'aux.png'
|
import pytest
from docarray import BaseDocument
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_to_from_bytes(protocol, compress):
d = MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png'))
assert d.text == 'hello'
assert d.embedding.tolist() == [1, 2, 3, 4, 5]
assert d.image.url == 'aux.png'
bstr = d.to_bytes(protocol=protocol, compress=compress)
d2 = MyDoc.from_bytes(bstr, protocol=protocol, compress=compress)
assert d2.text == 'hello'
assert d2.embedding.tolist() == [1, 2, 3, 4, 5]
assert d2.image.url == 'aux.png'
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_to_from_base64(protocol, compress):
d = MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png'))
assert d.text == 'hello'
assert d.embedding.tolist() == [1, 2, 3, 4, 5]
assert d.image.url == 'aux.png'
bstr = d.to_base64(protocol=protocol, compress=compress)
d2 = MyDoc.from_base64(bstr, protocol=protocol, compress=compress)
assert d2.text == 'hello'
assert d2.embedding.tolist() == [1, 2, 3, 4, 5]
assert d2.image.url == 'aux.png'
|
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class LinearReader(BaseReader):
"""
Linear reader. Reads data from Linear issues for the passed query.
Args:
api_key (str): Personal API token.
"""
def __init__(self, api_key: str) -> None:
self.api_key = api_key
def load_data(self, query: str) -> List[Document]:
# Define the GraphQL query
graphql_endpoint = "https://api.linear.app/graphql"
headers = {
"Authorization": self.api_key,
"Content-Type": "application/json",
}
payload = {"query": query}
# Make the GraphQL request
response = requests.post(graphql_endpoint, json=payload, headers=headers)
data = response.json()
# Extract relevant information
issues = []
team_data = data.get("data", {}).get("team", {})
for issue in team_data.get("issues", {}).get("nodes", []):
assignee = issue.get("assignee", {}).get("name", "")
labels = [
label_node["name"]
for label_node in issue.get("labels", {}).get("nodes", [])
]
project = issue.get("project", {}).get("name", "")
state = issue.get("state", {}).get("name", "")
creator = issue.get("creator", {}).get("name", "")
issues.append(
Document(
text=f"{issue['title']} \n {issue['description']}",
extra_info={
"id": issue["id"],
"title": issue["title"],
"created_at": issue["createdAt"],
"archived_at": issue["archivedAt"],
"auto_archived_at": issue["autoArchivedAt"],
"auto_closed_at": issue["autoClosedAt"],
"branch_name": issue["branchName"],
"canceled_at": issue["canceledAt"],
"completed_at": issue["completedAt"],
"creator": creator,
"due_date": issue["dueDate"],
"estimate": issue["estimate"],
"labels": labels,
"project": project,
"state": state,
"updated_at": issue["updatedAt"],
"assignee": assignee,
},
)
)
return issues
|
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class LinearReader(BaseReader):
"""Linear reader. Reads data from Linear issues for the passed query.
Args:
api_key (str): Personal API token.
"""
def __init__(self, api_key: str) -> None:
self.api_key = api_key
def load_data(self, query: str) -> List[Document]:
# Define the GraphQL query
graphql_endpoint = "https://api.linear.app/graphql"
headers = {
"Authorization": self.api_key,
"Content-Type": "application/json",
}
payload = {"query": query}
# Make the GraphQL request
response = requests.post(graphql_endpoint, json=payload, headers=headers)
data = response.json()
# Extract relevant information
issues = []
team_data = data.get("data", {}).get("team", {})
for issue in team_data.get("issues", {}).get("nodes", []):
assignee = issue.get("assignee", {}).get("name", "")
labels = [
label_node["name"]
for label_node in issue.get("labels", {}).get("nodes", [])
]
project = issue.get("project", {}).get("name", "")
state = issue.get("state", {}).get("name", "")
creator = issue.get("creator", {}).get("name", "")
issues.append(
Document(
text=f"{issue['title']} \n {issue['description']}",
extra_info={
"id": issue["id"],
"title": issue["title"],
"created_at": issue["createdAt"],
"archived_at": issue["archivedAt"],
"auto_archived_at": issue["autoArchivedAt"],
"auto_closed_at": issue["autoClosedAt"],
"branch_name": issue["branchName"],
"canceled_at": issue["canceledAt"],
"completed_at": issue["completedAt"],
"creator": creator,
"due_date": issue["dueDate"],
"estimate": issue["estimate"],
"labels": labels,
"project": project,
"state": state,
"updated_at": issue["updatedAt"],
"assignee": assignee,
},
)
)
return issues
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
def preprocess_panoptic_gt(gt_labels, gt_masks, gt_semantic_seg, num_things,
num_stuff, img_metas):
"""Preprocess the ground truth for a image.
Args:
gt_labels (Tensor): Ground truth labels of each bbox,
with shape (num_gts, ).
gt_masks (BitmapMasks): Ground truth masks of each instances
of a image, shape (num_gts, h, w).
gt_semantic_seg (Tensor | None): Ground truth of semantic
segmentation with the shape (1, h, w).
[0, num_thing_class - 1] means things,
[num_thing_class, num_class-1] means stuff,
255 means VOID. It's None when training instance segmentation.
img_metas (dict): List of image meta information.
Returns:
tuple: a tuple containing the following targets.
- labels (Tensor): Ground truth class indices for a
image, with shape (n, ), n is the sum of number
of stuff type and number of instance in a image.
- masks (Tensor): Ground truth mask for a image, with
shape (n, h, w). Contains stuff and things when training
panoptic segmentation, and things only when training
instance segmentation.
"""
num_classes = num_things + num_stuff
things_masks = gt_masks.pad(img_metas['pad_shape'][:2], pad_val=0)\
.to_tensor(dtype=torch.bool, device=gt_labels.device)
if gt_semantic_seg is None:
masks = things_masks.long()
return gt_labels, masks
things_labels = gt_labels
gt_semantic_seg = gt_semantic_seg.squeeze(0)
semantic_labels = torch.unique(
gt_semantic_seg,
sorted=False,
return_inverse=False,
return_counts=False)
stuff_masks_list = []
stuff_labels_list = []
for label in semantic_labels:
if label < num_things or label >= num_classes:
continue
stuff_mask = gt_semantic_seg == label
stuff_masks_list.append(stuff_mask)
stuff_labels_list.append(label)
if len(stuff_masks_list) > 0:
stuff_masks = torch.stack(stuff_masks_list, dim=0)
stuff_labels = torch.stack(stuff_labels_list, dim=0)
labels = torch.cat([things_labels, stuff_labels], dim=0)
masks = torch.cat([things_masks, stuff_masks], dim=0)
else:
labels = things_labels
masks = things_masks
masks = masks.long()
return labels, masks
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
def preprocess_panoptic_gt(gt_labels, gt_masks, gt_semantic_seg, num_things,
num_stuff):
"""Preprocess the ground truth for a image.
Args:
gt_labels (Tensor): Ground truth labels of each bbox,
with shape (num_gts, ).
gt_masks (BitmapMasks): Ground truth masks of each instances
of a image, shape (num_gts, h, w).
gt_semantic_seg (Tensor): Ground truth of semantic
segmentation with the shape (1, h, w).
[0, num_thing_class - 1] means things,
[num_thing_class, num_class-1] means stuff,
255 means VOID.
target_shape (tuple[int]): Shape of output mask_preds.
Resize the masks to shape of mask_preds.
Returns:
tuple: a tuple containing the following targets.
- labels (Tensor): Ground truth class indices for a
image, with shape (n, ), n is the sum of number
of stuff type and number of instance in a image.
- masks (Tensor): Ground truth mask for a image, with
shape (n, h, w).
"""
num_classes = num_things + num_stuff
things_labels = gt_labels
gt_semantic_seg = gt_semantic_seg.squeeze(0)
things_masks = gt_masks.pad(gt_semantic_seg.shape[-2:], pad_val=0)\
.to_tensor(dtype=torch.bool, device=gt_labels.device)
semantic_labels = torch.unique(
gt_semantic_seg,
sorted=False,
return_inverse=False,
return_counts=False)
stuff_masks_list = []
stuff_labels_list = []
for label in semantic_labels:
if label < num_things or label >= num_classes:
continue
stuff_mask = gt_semantic_seg == label
stuff_masks_list.append(stuff_mask)
stuff_labels_list.append(label)
if len(stuff_masks_list) > 0:
stuff_masks = torch.stack(stuff_masks_list, dim=0)
stuff_labels = torch.stack(stuff_labels_list, dim=0)
labels = torch.cat([things_labels, stuff_labels], dim=0)
masks = torch.cat([things_masks, stuff_masks], dim=0)
else:
labels = things_labels
masks = things_masks
masks = masks.long()
return labels, masks
|
"""Bing Search API toolkit."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import BingSearchResults, BingSearchRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BingSearchRun": "langchain_community.tools",
"BingSearchResults": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BingSearchResults",
"BingSearchRun",
]
|
"""Bing Search API toolkit."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import BingSearchResults, BingSearchRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BingSearchRun": "langchain_community.tools",
"BingSearchResults": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BingSearchRun",
"BingSearchResults",
]
|
"""Module for argparse for Client"""
def mixin_client_protocol_parser(parser):
"""Add the arguments for the protocol to the client parser
:param parser: the parser configure
"""
from jina.enums import ProtocolType
parser.add_argument(
'--protocol',
type=ProtocolType.from_string,
choices=list(ProtocolType),
default=ProtocolType.GRPC,
help='Communication protocol between server and client.',
)
def mixin_client_features_parser(parser):
"""Add the arguments for the client to the parser
:param parser: the parser configure
"""
parser.add_argument(
'--asyncio',
action='store_true',
default=False,
help='If set, then the input and output of this Client work in an asynchronous manner. ',
)
parser.add_argument(
'--tracing',
action='store_true',
default=False,
help='If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. '
'Otherwise a no-op implementation will be provided.',
)
parser.add_argument(
'--traces-exporter-host',
type=str,
default=None,
help='If tracing is enabled, this hostname will be used to configure the trace exporter agent.',
)
parser.add_argument(
'--traces-exporter-port',
type=int,
default=None,
help='If tracing is enabled, this port will be used to configure the trace exporter agent.',
)
parser.add_argument(
'--metrics',
action='store_true',
default=False,
help='If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. '
'Otherwise a no-op implementation will be provided.',
)
parser.add_argument(
'--metrics-exporter-host',
type=str,
default=None,
help='If tracing is enabled, this hostname will be used to configure the metrics exporter agent.',
)
parser.add_argument(
'--metrics-exporter-port',
type=int,
default=None,
help='If tracing is enabled, this port will be used to configure the metrics exporter agent.',
)
parser.add_argument(
'--log-config',
type=str,
default='default',
help='The config name or the absolute path to the YAML config file of the logger used in this object.',
)
|
"""Module for argparse for Client"""
def mixin_client_protocol_parser(parser):
"""Add the arguments for the protocol to the client parser
:param parser: the parser configure
"""
from jina.enums import GatewayProtocolType
parser.add_argument(
'--protocol',
type=GatewayProtocolType.from_string,
choices=list(GatewayProtocolType),
default=GatewayProtocolType.GRPC,
help='Communication protocol between server and client.',
)
def mixin_client_features_parser(parser):
"""Add the arguments for the client to the parser
:param parser: the parser configure
"""
parser.add_argument(
'--asyncio',
action='store_true',
default=False,
help='If set, then the input and output of this Client work in an asynchronous manner. ',
)
parser.add_argument(
'--tracing',
action='store_true',
default=False,
help='If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. '
'Otherwise a no-op implementation will be provided.',
)
parser.add_argument(
'--traces-exporter-host',
type=str,
default=None,
help='If tracing is enabled, this hostname will be used to configure the trace exporter agent.',
)
parser.add_argument(
'--traces-exporter-port',
type=int,
default=None,
help='If tracing is enabled, this port will be used to configure the trace exporter agent.',
)
parser.add_argument(
'--metrics',
action='store_true',
default=False,
help='If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. '
'Otherwise a no-op implementation will be provided.',
)
parser.add_argument(
'--metrics-exporter-host',
type=str,
default=None,
help='If tracing is enabled, this hostname will be used to configure the metrics exporter agent.',
)
parser.add_argument(
'--metrics-exporter-port',
type=int,
default=None,
help='If tracing is enabled, this port will be used to configure the metrics exporter agent.',
)
parser.add_argument(
'--log-config',
type=str,
default='default',
help='The config name or the absolute path to the YAML config file of the logger used in this object.',
)
|
try:
from docarray import BaseDoc as Document
from docarray import DocArray as DocumentArray
docarray_v2 = True
except ImportError:
from docarray import Document, DocumentArray
docarray_v2 = False
|
try:
from docarray import BaseDocument as Document
from docarray import DocumentArray
docarray_v2 = True
except ImportError:
from docarray import Document, DocumentArray
docarray_v2 = False
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.19.3.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# isort: split
# Deprecated modules
from . import arrow_dataset as _arrow_dataset
from . import utils as _utils
from .utils import download_manager as _deprecated_download_manager
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.19.3.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import AspectRatioBatchSampler
from .class_aware_sampler import ClassAwareSampler
__all__ = ['ClassAwareSampler', 'AspectRatioBatchSampler']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .class_aware_sampler import ClassAwareSampler
from .distributed_sampler import DistributedSampler
from .group_sampler import DistributedGroupSampler, GroupSampler
from .infinite_sampler import InfiniteBatchSampler, InfiniteGroupBatchSampler
__all__ = [
'DistributedSampler', 'DistributedGroupSampler', 'GroupSampler',
'InfiniteGroupBatchSampler', 'InfiniteBatchSampler', 'ClassAwareSampler'
]
|
import json
import pathlib
from typing import Any, Callable, List, Optional, Tuple
from urllib.parse import urlparse
from PIL import Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class CLEVRClassification(VisionDataset):
"""`CLEVR <https://cs.stanford.edu/people/jcjohns/clevr/>`_ classification dataset.
The number of objects in a scene are used as label.
Args:
root (string): Root directory of dataset where directory ``root/clevr`` exists or will be saved to if download is
set to True.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in them target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory. If
dataset is already downloaded, it is not downloaded again.
"""
_URL = "https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip"
_MD5 = "b11922020e72d0cd9154779b2d3d07d2"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
super().__init__(root, transform=transform, target_transform=target_transform)
self._base_folder = pathlib.Path(self.root) / "clevr"
self._data_folder = self._base_folder / pathlib.Path(urlparse(self._URL).path).stem
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self._image_files = sorted(self._data_folder.joinpath("images", self._split).glob("*"))
self._labels: List[Optional[int]]
if self._split != "test":
with open(self._data_folder / "scenes" / f"CLEVR_{self._split}_scenes.json") as file:
content = json.load(file)
num_objects = {scene["image_filename"]: len(scene["objects"]) for scene in content["scenes"]}
self._labels = [num_objects[image_file.name] for image_file in self._image_files]
else:
self._labels = [None] * len(self._image_files)
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file = self._image_files[idx]
label = self._labels[idx]
image = Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _check_exists(self) -> bool:
return self._data_folder.exists() and self._data_folder.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, str(self._base_folder), md5=self._MD5)
def extra_repr(self) -> str:
return f"split={self._split}"
|
import json
import pathlib
from typing import Any, Callable, List, Optional, Tuple
from urllib.parse import urlparse
from PIL import Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class CLEVRClassification(VisionDataset):
"""`CLEVR <https://cs.stanford.edu/people/jcjohns/clevr/>`_ classification dataset.
The number of objects in a scene are used as label.
Args:
root (string): Root directory of dataset where directory ``root/clevr`` exists or will be saved to if download is
set to True.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in them target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory. If
dataset is already downloaded, it is not downloaded again.
"""
_URL = "https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip"
_MD5 = "b11922020e72d0cd9154779b2d3d07d2"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
super().__init__(root, transform=transform, target_transform=target_transform)
self._base_folder = pathlib.Path(self.root) / "clevr"
self._data_folder = self._base_folder / pathlib.Path(urlparse(self._URL).path).stem
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self._image_files = sorted(self._data_folder.joinpath("images", self._split).glob("*"))
self._labels: List[Optional[int]]
if self._split != "test":
with open(self._data_folder / "scenes" / f"CLEVR_{self._split}_scenes.json") as file:
content = json.load(file)
num_objects = {scene["image_filename"]: len(scene["objects"]) for scene in content["scenes"]}
self._labels = [num_objects[image_file.name] for image_file in self._image_files]
else:
self._labels = [None] * len(self._image_files)
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file = self._image_files[idx]
label = self._labels[idx]
image = Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _check_exists(self) -> bool:
return self._data_folder.exists() and self._data_folder.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, str(self._base_folder), md5=self._MD5)
def extra_repr(self) -> str:
return f"split={self._split}"
|
"""Database Tool."""
from typing import Any, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.utilities.sql_wrapper import SQLDatabase
from sqlalchemy import MetaData, text
from sqlalchemy.engine import Engine
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy.schema import CreateTable
class DatabaseToolSpec(BaseToolSpec, BaseReader):
"""
Simple Database tool.
Concatenates each row into Document used by LlamaIndex.
Args:
sql_database (Optional[SQLDatabase]): SQL database to use,
including table names to specify.
See :ref:`Ref-Struct-Store` for more details.
OR
engine (Optional[Engine]): SQLAlchemy Engine object of the database connection.
OR
uri (Optional[str]): uri of the database connection.
OR
scheme (Optional[str]): scheme of the database connection.
host (Optional[str]): host of the database connection.
port (Optional[int]): port of the database connection.
user (Optional[str]): user of the database connection.
password (Optional[str]): password of the database connection.
dbname (Optional[str]): dbname of the database connection.
"""
spec_functions = ["load_data", "describe_tables", "list_tables"]
def __init__(
self,
sql_database: Optional[SQLDatabase] = None,
engine: Optional[Engine] = None,
uri: Optional[str] = None,
scheme: Optional[str] = None,
host: Optional[str] = None,
port: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
dbname: Optional[str] = None,
*args: Optional[Any],
**kwargs: Optional[Any],
) -> None:
"""Initialize with parameters."""
if sql_database:
self.sql_database = sql_database
elif engine:
self.sql_database = SQLDatabase(engine, *args, **kwargs)
elif uri:
self.uri = uri
self.sql_database = SQLDatabase.from_uri(uri, *args, **kwargs)
elif scheme and host and port and user and password and dbname:
uri = f"{scheme}://{user}:{password}@{host}:{port}/{dbname}"
self.uri = uri
self.sql_database = SQLDatabase.from_uri(uri, *args, **kwargs)
else:
raise ValueError(
"You must provide either a SQLDatabase, "
"a SQL Alchemy Engine, a valid connection URI, or a valid "
"set of credentials."
)
self._metadata = MetaData()
self._metadata.reflect(bind=self.sql_database.engine)
def load_data(self, query: str) -> List[Document]:
"""
Query and load data from the Database, returning a list of Documents.
Args:
query (str): an SQL query to filter tables and rows.
Returns:
List[Document]: A list of Document objects.
"""
documents = []
with self.sql_database.engine.connect() as connection:
if query is None:
raise ValueError("A query parameter is necessary to filter the data")
else:
result = connection.execute(text(query))
for item in result.fetchall():
# fetch each item
doc_str = ", ".join([str(entry) for entry in item])
documents.append(Document(text=doc_str))
return documents
def list_tables(self) -> List[str]:
"""
Returns a list of available tables in the database.
To retrieve details about the columns of specific tables, use
the describe_tables endpoint.
"""
return [x.name for x in self._metadata.sorted_tables]
def describe_tables(self, tables: Optional[List[str]] = None) -> str:
"""
Describes the specified tables in the database.
Args:
tables (List[str]): A list of table names to retrieve details about
"""
table_names = tables or [table.name for table in self._metadata.sorted_tables]
table_schemas = []
for table_name in table_names:
table = next(
(
table
for table in self._metadata.sorted_tables
if table.name == table_name
),
None,
)
if table is None:
raise NoSuchTableError(f"Table '{table_name}' does not exist.")
schema = str(CreateTable(table).compile(self.sql_database._engine))
table_schemas.append(f"{schema}\n")
return "\n".join(table_schemas)
|
"""Database Tool."""
from typing import Any, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.utilities.sql_wrapper import SQLDatabase
from sqlalchemy import MetaData, text
from sqlalchemy.engine import Engine
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy.schema import CreateTable
class DatabaseToolSpec(BaseToolSpec, BaseReader):
"""Simple Database tool.
Concatenates each row into Document used by LlamaIndex.
Args:
sql_database (Optional[SQLDatabase]): SQL database to use,
including table names to specify.
See :ref:`Ref-Struct-Store` for more details.
OR
engine (Optional[Engine]): SQLAlchemy Engine object of the database connection.
OR
uri (Optional[str]): uri of the database connection.
OR
scheme (Optional[str]): scheme of the database connection.
host (Optional[str]): host of the database connection.
port (Optional[int]): port of the database connection.
user (Optional[str]): user of the database connection.
password (Optional[str]): password of the database connection.
dbname (Optional[str]): dbname of the database connection.
"""
spec_functions = ["load_data", "describe_tables", "list_tables"]
def __init__(
self,
sql_database: Optional[SQLDatabase] = None,
engine: Optional[Engine] = None,
uri: Optional[str] = None,
scheme: Optional[str] = None,
host: Optional[str] = None,
port: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
dbname: Optional[str] = None,
*args: Optional[Any],
**kwargs: Optional[Any],
) -> None:
"""Initialize with parameters."""
if sql_database:
self.sql_database = sql_database
elif engine:
self.sql_database = SQLDatabase(engine, *args, **kwargs)
elif uri:
self.uri = uri
self.sql_database = SQLDatabase.from_uri(uri, *args, **kwargs)
elif scheme and host and port and user and password and dbname:
uri = f"{scheme}://{user}:{password}@{host}:{port}/{dbname}"
self.uri = uri
self.sql_database = SQLDatabase.from_uri(uri, *args, **kwargs)
else:
raise ValueError(
"You must provide either a SQLDatabase, "
"a SQL Alchemy Engine, a valid connection URI, or a valid "
"set of credentials."
)
self._metadata = MetaData()
self._metadata.reflect(bind=self.sql_database.engine)
def load_data(self, query: str) -> List[Document]:
"""Query and load data from the Database, returning a list of Documents.
Args:
query (str): an SQL query to filter tables and rows.
Returns:
List[Document]: A list of Document objects.
"""
documents = []
with self.sql_database.engine.connect() as connection:
if query is None:
raise ValueError("A query parameter is necessary to filter the data")
else:
result = connection.execute(text(query))
for item in result.fetchall():
# fetch each item
doc_str = ", ".join([str(entry) for entry in item])
documents.append(Document(text=doc_str))
return documents
def list_tables(self) -> List[str]:
"""
Returns a list of available tables in the database.
To retrieve details about the columns of specific tables, use
the describe_tables endpoint.
"""
return [x.name for x in self._metadata.sorted_tables]
def describe_tables(self, tables: Optional[List[str]] = None) -> str:
"""
Describes the specified tables in the database.
Args:
tables (List[str]): A list of table names to retrieve details about
"""
table_names = tables or [table.name for table in self._metadata.sorted_tables]
table_schemas = []
for table_name in table_names:
table = next(
(
table
for table in self._metadata.sorted_tables
if table.name == table_name
),
None,
)
if table is None:
raise NoSuchTableError(f"Table '{table_name}' does not exist.")
schema = str(CreateTable(table).compile(self.sql_database._engine))
table_schemas.append(f"{schema}\n")
return "\n".join(table_schemas)
|
"""Module for helper functions for clients."""
from typing import Tuple, Optional
from docarray import Document, DocumentArray
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
def _new_data_request_from_batch(
batch, data_type: DataInputType, endpoint: str, target: Optional[str], parameters: Optional[dict]
) -> DataRequest:
req = _new_data_request(endpoint, target, parameters)
# add docs fields
_add_docs(req, batch, data_type)
return req
def _new_data_request(endpoint: str, target: Optional[str], parameters: Optional[dict]) -> DataRequest:
req = DataRequest()
# set up header
req.header.exec_endpoint = endpoint
if target:
req.header.target_executor = target
# add parameters field
if parameters:
req.parameters = parameters
return req
def _new_doc_from_data(
data, data_type: DataInputType
) -> Tuple['Document', 'DataInputType']:
def _build_doc_from_content():
return Document(content=data), DataInputType.CONTENT
if data_type == DataInputType.DICT:
return Document.from_dict(data), DataInputType.DICT
if data_type == DataInputType.AUTO or data_type == DataInputType.DOCUMENT:
if isinstance(data, Document):
# if incoming is already primitive type Document, then all good, best practice!
return data, DataInputType.DOCUMENT
elif isinstance(data, dict):
return Document.from_dict(data), DataInputType.DICT
try:
d = Document(data)
return d, DataInputType.DOCUMENT # NOT HIT
except ValueError:
# AUTO has a fallback, now reconsider it as content
if data_type == DataInputType.AUTO:
return _build_doc_from_content()
else:
raise
elif data_type == DataInputType.CONTENT:
return _build_doc_from_content()
def _add_docs(req: DataRequest, batch, data_type: DataInputType) -> None:
da = DocumentArray()
for content in batch:
d, data_type = _new_doc_from_data(content, data_type)
da.append(d)
req.data.docs = da
|
"""Module for helper functions for clients."""
from typing import Tuple
from docarray import Document, DocumentArray
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
def _new_data_request_from_batch(
_kwargs, batch, data_type, endpoint, target, parameters
):
req = _new_data_request(endpoint, target, parameters)
# add docs fields
_add_docs(req, batch, data_type, _kwargs)
return req
def _new_data_request(endpoint, target, parameters):
req = DataRequest()
# set up header
if endpoint:
req.header.exec_endpoint = endpoint
if target:
req.header.target_executor = target
# add parameters field
if parameters:
req.parameters = parameters
return req
def _new_doc_from_data(
data, data_type: DataInputType, **kwargs
) -> Tuple['Document', 'DataInputType']:
def _build_doc_from_content():
return Document(content=data, **kwargs), DataInputType.CONTENT
if data_type == DataInputType.DICT:
doc = Document.from_dict(data)
return doc, DataInputType.DICT
if data_type == DataInputType.AUTO or data_type == DataInputType.DOCUMENT:
if isinstance(data, Document):
# if incoming is already primitive type Document, then all good, best practice!
return data, DataInputType.DOCUMENT
elif isinstance(data, dict):
return Document.from_dict(data), DataInputType.DICT
try:
d = Document(data, **kwargs)
return d, DataInputType.DOCUMENT
except ValueError:
# AUTO has a fallback, now reconsider it as content
if data_type == DataInputType.AUTO:
return _build_doc_from_content()
else:
raise
elif data_type == DataInputType.CONTENT:
return _build_doc_from_content()
def _add_docs(req, batch, data_type, _kwargs):
da = DocumentArray()
for content in batch:
if isinstance(content, tuple) and len(content) == 2:
d, data_type = _new_doc_from_data(content[0], data_type, **_kwargs)
da.append(d)
else:
d, data_type = _new_doc_from_data(content, data_type, **_kwargs)
da.append(d)
req.data.docs = da
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.ndarray import NdArray
MAX_INT_16 = 2**15
@_register_proto(proto_type_name='image_ndarray')
class ImageNdArray(AbstractImageTensor, NdArray):
"""
Subclass of NdArray, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageNdArray, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageNdArray]
url: Optional[ImageUrl]
bytes: Optional[bytes]
# from url
doc = MyImageDoc(
title='my_second_audio_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.ndarray import NdArray
MAX_INT_16 = 2**15
@_register_proto(proto_type_name='image_ndarray')
class ImageNdArray(AbstractImageTensor, NdArray):
"""
Subclass of NdArray, to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageNdArray, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageNdArray]
url: Optional[ImageUrl]
bytes: Optional[bytes]
# from url
doc = MyImageDoc(
title='my_second_audio_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
"""
...
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_doc(d: ImageDoc) -> ImageDoc:
if d.url is not None:
d.tensor = d.url.load()
return d
@pytest.fixture()
def da():
da = DocList[ImageDoc]([ImageDoc(url=IMAGE_PATHS['png']) for _ in range(N_DOCS)])
return da
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map(da, backend):
for tensor in da.tensor:
assert tensor is None
docs = list(map_docs(docs=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for doc in docs:
assert doc.tensor is not None
def test_map_multiprocessing_lambda_func_raise_exception(da):
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(docs=da, func=lambda x: x, backend='process'))
def test_map_multiprocessing_local_func_raise_exception(da):
def local_func(x):
return x
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(docs=da, func=local_func, backend='process'))
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_check_order(backend):
da = DocList[ImageDoc]([ImageDoc(id=str(i)) for i in range(N_DOCS)])
docs = list(map_docs(docs=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for i, doc in enumerate(docs):
assert doc.id == str(i)
def load_from_da(da: DocList) -> DocList:
for doc in da:
doc.tensor = doc.url.load()
return da
class MyImage(BaseDoc):
tensor: Optional[NdArray] = None
url: ImageUrl
@pytest.mark.slow
@pytest.mark.parametrize('n_docs,batch_size', [(10, 5), (10, 8)])
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map_docs_batched(n_docs, batch_size, backend):
da = DocList[MyImage]([MyImage(url=IMAGE_PATHS['png']) for _ in range(n_docs)])
it = map_docs_batched(
docs=da, func=load_from_da, batch_size=batch_size, backend=backend
)
assert isinstance(it, Generator)
for batch in it:
assert isinstance(batch, DocList[MyImage])
|
from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_doc(d: ImageDoc) -> ImageDoc:
if d.url is not None:
d.tensor = d.url.load()
return d
@pytest.fixture()
def da():
da = DocList[ImageDoc]([ImageDoc(url=IMAGE_PATHS['png']) for _ in range(N_DOCS)])
return da
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map(da, backend):
for tensor in da.tensor:
assert tensor is None
docs = list(map_docs(docs=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for doc in docs:
assert doc.tensor is not None
def test_map_multiprocessing_lambda_func_raise_exception(da):
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(docs=da, func=lambda x: x, backend='process'))
def test_map_multiprocessing_local_func_raise_exception(da):
def local_func(x):
return x
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(docs=da, func=local_func, backend='process'))
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_check_order(backend):
da = DocList[ImageDoc]([ImageDoc(id=str(i)) for i in range(N_DOCS)])
docs = list(map_docs(docs=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for i, doc in enumerate(docs):
assert doc.id == str(i)
def load_from_da(da: DocList) -> DocList:
for doc in da:
doc.tensor = doc.url.load()
return da
class MyImage(BaseDoc):
tensor: Optional[NdArray] = None
url: ImageUrl
@pytest.mark.slow
@pytest.mark.parametrize('n_docs,batch_size', [(10, 5), (10, 8)])
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map_docs_batched(n_docs, batch_size, backend):
da = DocList[MyImage]([MyImage(url=IMAGE_PATHS['png']) for _ in range(n_docs)])
it = map_docs_batched(
docs=da, func=load_from_da, batch_size=batch_size, backend=backend
)
assert isinstance(it, Generator)
for batch in it:
assert isinstance(batch, DocList[MyImage])
|
from langchain_core.exceptions import TracerException
from langchain_core.tracers.base import BaseTracer
__all__ = ["BaseTracer", "TracerException"]
|
from langchain_core.exceptions import TracerException
from langchain_core.tracers.base import BaseTracer
__all__ = ["TracerException", "BaseTracer"]
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.13.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.12.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CrossEncoderCorrelationEvaluator
from sentence_transformers.cross_encoder.losses.BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 4
output_dir = "output/training_ce_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 1 label
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, or rerankers like Alibaba-NLP/gte-reranker-modernbert-base
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=1)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss, we use one that accepts pairs with a binary label
loss = BinaryCrossEntropyLoss(model)
# 4. Before and during training, we use CrossEncoderClassificationEvaluator to measure the performance on the dev set
eval_evaluator = CrossEncoderCorrelationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
scores=eval_dataset["score"],
name="stsb-validation",
)
eval_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-stsb"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=80,
save_strategy="steps",
save_steps=80,
save_total_limit=2,
logging_steps=20,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=eval_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_evaluator = CrossEncoderCorrelationEvaluator(
sentence_pairs=list(zip(test_dataset["sentence1"], test_dataset["sentence2"])),
scores=test_dataset["score"],
name="stsb-test",
)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
from sentence_transformers.cross_encoder.losses.BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 4
output_dir = "output/training_ce_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 1 label
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, or rerankers like Alibaba-NLP/gte-reranker-modernbert-base
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=1)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss, we use one that accepts pairs with a binary label
loss = BinaryCrossEntropyLoss(model)
# 4. Before and during training, we use CEClassificationEvaluator to measure the performance on the dev set
eval_evaluator = CECorrelationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
scores=eval_dataset["score"],
name="stsb-validation",
)
eval_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-stsb"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=80,
save_strategy="steps",
save_steps=80,
save_total_limit=2,
logging_steps=20,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=eval_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_evaluator = CECorrelationEvaluator(
sentence_pairs=list(zip(test_dataset["sentence1"], test_dataset["sentence2"])),
scores=test_dataset["score"],
name="stsb-test",
)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
"""Wikipedia tool spec."""
from typing import Any, Dict
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class WikipediaToolSpec(BaseToolSpec):
"""
Specifies two tools for querying information from Wikipedia.
"""
spec_functions = ["load_data", "search_data"]
def load_data(
self, page: str, lang: str = "en", **load_kwargs: Dict[str, Any]
) -> str:
"""
Retrieve a Wikipedia page. Useful for learning about a particular concept that isn't private information.
Args:
page (str): Title of the page to read.
lang (str): Language of Wikipedia to read. (default: English)
"""
import wikipedia
wikipedia.set_lang(lang)
try:
wikipedia_page = wikipedia.page(page, **load_kwargs, auto_suggest=False)
except wikipedia.PageError:
return "Unable to load page. Try searching instead."
return wikipedia_page.content
def search_data(self, query: str, lang: str = "en") -> str:
"""
Search Wikipedia for a page related to the given query.
Use this tool when `load_data` returns no results.
Args:
query (str): the string to search for
"""
import wikipedia
pages = wikipedia.search(query)
if len(pages) == 0:
return "No search results."
return self.load_data(pages[0], lang)
|
"""Wikipedia tool spec."""
from typing import Any, Dict
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class WikipediaToolSpec(BaseToolSpec):
"""
Specifies two tools for querying information from Wikipedia.
"""
spec_functions = ["load_data", "search_data"]
def load_data(
self, page: str, lang: str = "en", **load_kwargs: Dict[str, Any]
) -> str:
"""
Retrieve a Wikipedia page. Useful for learning about a particular concept that isn't private information.
Args:
page (str): Title of the page to read.
lang (str): Language of Wikipedia to read. (default: English)
"""
import wikipedia
wikipedia.set_lang(lang)
try:
wikipedia_page = wikipedia.page(page, **load_kwargs, auto_suggest=False)
except wikipedia.PageError:
return "Unable to load page. Try searching instead."
return wikipedia_page.content
def search_data(self, query: str, lang: str = "en") -> str:
"""
Search Wikipedia for a page related to the given query.
Use this tool when `load_data` returns no results.
Args:
query (str): the string to search for
"""
import wikipedia
pages = wikipedia.search(query)
if len(pages) == 0:
return "No search results."
return self.load_data(pages[0], lang)
|
from typing import List, Optional
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
def test_equal_nested_docs():
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
assert nested_docs == nested_docs
@pytest.fixture
def nested_docs():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
hello: str = 'world'
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
return nested_docs
def test_nested_to_dict(nested_docs):
d = nested_docs.dict()
assert (d['docs'][0]['simple_tens'] == np.ones(10)).all()
def test_nested_to_dict_exclude(nested_docs):
d = nested_docs.dict(exclude={'docs'})
assert 'docs' not in d.keys()
def test_nested_to_dict_exclude_set(nested_docs):
d = nested_docs.dict(exclude={'hello'})
assert 'hello' not in d.keys()
def test_nested_to_dict_exclude_dict(nested_docs): # doto change
d = nested_docs.dict(exclude={'hello': True})
assert 'hello' not in d.keys()
|
from typing import List, Optional
from docarray.base_doc.doc import BaseDoc
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
def test_equal_nested_docs():
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
assert nested_docs == nested_docs
|
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Mesh3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_mesh(file_url):
mesh = Mesh3D(url=file_url)
mesh.vertices, mesh.faces = mesh.url.load()
assert isinstance(mesh.vertices, np.ndarray)
assert isinstance(mesh.faces, np.ndarray)
def test_str_init():
t = parse_obj_as(Mesh3D, 'http://hello.ply')
assert t.url == 'http://hello.ply'
def test_doc():
class MyDoc(BaseDocument):
mesh1: Mesh3D
mesh2: Mesh3D
doc = MyDoc(mesh1='http://hello.ply', mesh2=Mesh3D(url='http://hello.ply'))
assert doc.mesh1.url == 'http://hello.ply'
assert doc.mesh2.url == 'http://hello.ply'
|
import numpy as np
import pytest
from docarray.documents import Mesh3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_mesh(file_url):
mesh = Mesh3D(url=file_url)
mesh.vertices, mesh.faces = mesh.url.load()
assert isinstance(mesh.vertices, np.ndarray)
assert isinstance(mesh.faces, np.ndarray)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import requests, DocumentArray, Executor
from jina_commons import get_logger
try:
from jinahub.indexers.searcher.FaissSearcher import FaissSearcher
except:
from jina_executors.indexers.searcher.FaissSearcher.faiss_searcher import FaissSearcher
try:
from jinahub.indexers.storage.PostgreSQLStorage import (
PostgreSQLStorage,
)
except:
from jina_executors.indexers.storage.PostgreSQLStorage import (
PostgreSQLStorage,
)
class FaissPostgresSearcher(Executor):
"""A Compound Indexer made up of a FaissSearcher (for vectors) and a Postgres Indexer"""
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
# when constructed from rolling update the dump_path is passed via a runtime_arg
dump_path = dump_path or kwargs.get('runtime_args').get('dump_path')
self.logger = get_logger(self)
self._kv_indexer = None
self._vec_indexer = None
if dump_path:
self._vec_indexer = FaissSearcher(dump_path=dump_path, **kwargs)
self._kv_indexer = PostgreSQLStorage(**kwargs)
else:
self.logger.warning(
f'No dump path provided for {self}. Use .rolling_update() to re-initialize...'
)
@requests(on='/search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
if self._kv_indexer and self._vec_indexer:
self._vec_indexer.search(docs, parameters)
kv_parameters = copy.deepcopy(parameters)
kv_parameters['traversal_paths'] = [
path + 'm' for path in kv_parameters.get('traversal_paths', ['r'])
]
self._kv_indexer.search(docs, kv_parameters)
else:
return
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import copy
from typing import Dict
from jina import requests, DocumentArray, Executor
from jina_commons import get_logger
from jinahub.indexers.searcher.FaissSearcher.faiss_searcher import FaissSearcher
from jinahub.indexers.storage.PostgreSQLStorage import PostgreSQLStorage
class FaissPostgresSearcher(Executor):
"""A Compound Indexer made up of a FaissSearcher (for vectors) and a Postgres Indexer"""
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
# when constructed from rolling update the dump_path is passed via a runtime_arg
dump_path = dump_path or kwargs.get('runtime_args').get('dump_path')
self.logger = get_logger(self)
self._kv_indexer = None
self._vec_indexer = None
if dump_path:
self._vec_indexer = FaissSearcher(dump_path=dump_path, **kwargs)
self._kv_indexer = PostgreSQLStorage(**kwargs)
else:
self.logger.warning(
f'No dump path provided for {self}. Use .rolling_update() to re-initialize...'
)
@requests(on='/search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
if self._kv_indexer and self._vec_indexer:
self._vec_indexer.search(docs, parameters)
kv_parameters = copy.deepcopy(parameters)
kv_parameters['traversal_paths'] = [
path + 'm' for path in kv_parameters.get('traversal_paths', ['r'])
]
self._kv_indexer.search(docs, kv_parameters)
else:
return
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarray.utils.misc import is_tf_available, is_torch_available
T = TypeVar('T', bound='ImageDoc')
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
class ImageDoc(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import ImageDoc
# use it directly
image = ImageDoc(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import ImageDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(ImageDoc):
second_embedding: Optional[AnyEmbedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDocument):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes.load()
"""
url: Optional[ImageUrl]
tensor: Optional[ImageTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[ImageBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif (
isinstance(value, (AbstractTensor, np.ndarray))
or (torch_available and isinstance(value, torch.Tensor))
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
elif isinstance(value, bytes):
value = cls(byte=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarray.utils.misc import is_tf_available, is_torch_available
T = TypeVar('T', bound='Image')
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Image
# use it directly
image = Image(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import Image
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(Image):
second_embedding: Optional[AnyEmbedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes.load()
"""
url: Optional[ImageUrl]
tensor: Optional[ImageTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[ImageBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif (
isinstance(value, (AbstractTensor, np.ndarray))
or (torch_available and isinstance(value, torch.Tensor))
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
elif isinstance(value, bytes):
value = cls(byte=value)
return super().validate(value)
|
# Copyright (c) OpenMMLab. All rights reserved.
# from mmengine.dist import get_dist_info, all_reduce
from collections import OrderedDict
from typing import Generator, List
from unittest.mock import MagicMock, Mock
import torch
from torch._utils import (_flatten_dense_tensors, _take_tensors,
_unflatten_dense_tensors)
from mmengine.registry import HOOKS
from .hook import Hook
# TODO, replace with import mmengine.dist as dist
dist = Mock()
dist.IS_DIST = MagicMock(return_value=True)
# TODO, replace with mmengine.dist.get_dist_info
get_dist_info = MagicMock(return_value=(0, 1))
# TODO, replace with mmengine.dist.all_reduce
all_reduce = MagicMock()
# TODO, may need to move to dist.utils after implementing dist module
def _allreduce_coalesced(tensors: List[torch.Tensor],
world_size: int,
bucket_size_mb: int = -1) -> None:
"""All-reduce a sequence of tensors as a whole.
Args:
tensors (List[torch.Tensor]): A sequence of tensors to be
all-reduced.
world_size (int): The world size of the process group.
bucket_size_mb (int): The limit of each chunk in megabytes
for grouping tensors into chunks. Defaults to -1.
"""
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_params(params: Generator[torch.Tensor, None, None],
coalesce: bool = True,
bucket_size_mb: int = -1) -> None:
"""All-reduce parameters.
Args:
params (Generator[torch.Tensor, None, None]): List of parameters or
buffers of a model.
coalesce (bool, optional): Whether to reduce parameters as a whole.
Defaults to True.
bucket_size_mb (int, optional): Size of bucket, the unit is MB.
Defaults to -1.
"""
_, world_size = get_dist_info()
if world_size == 1:
return
params_data = [param.data for param in params]
if coalesce:
_allreduce_coalesced(params_data, world_size, bucket_size_mb)
else:
for tensor in params_data:
all_reduce(tensor.div_(world_size))
@HOOKS.register_module()
class SyncBuffersHook(Hook):
"""Synchronize model buffers such as running_mean and running_var in BN at
the end of each epoch."""
priority = 'NORMAL'
def __init__(self) -> None:
self.distributed = dist.IS_DIST
def after_epoch(self, runner: object) -> None:
"""All-reduce model buffers at the end of each epoch.
Args:
runner (object): The runner of the training process.
"""
if self.distributed:
allreduce_params(runner.model.buffers()) # type: ignore
|
# Copyright (c) OpenMMLab. All rights reserved.
# from mmengine.dist import get_dist_info, all_reduce
from collections import OrderedDict
from typing import Generator, List
from unittest.mock import MagicMock, Mock
import torch
from torch._utils import (_flatten_dense_tensors, _take_tensors,
_unflatten_dense_tensors)
from mmengine.registry import HOOKS
from .hook import Hook
# TODO, replace with import mmengine.dist as dist
dist = Mock()
dist.IS_DIST = MagicMock(return_value=True)
# TODO, replace with mmengine.dist.get_dist_info
get_dist_info = MagicMock(return_value=(0, 1))
# TODO, replace with mmengine.dist.all_reduce
all_reduce = MagicMock()
# TODO, may need to move to dist.utils after implementing dist module
def _allreduce_coalesced(tensors: List[torch.Tensor],
world_size: int,
bucket_size_mb: int = -1) -> None:
"""All-reduce a sequence of tensors as a whole.
Args:
tensors (List[torch.Tensor]): A sequence of tensors to be
all-reduced.
world_size (int): The world size of the process group.
bucket_size_mb (int): The limit of each chunk in megabytes
for grouping tensors into chunks. Defaults to -1.
"""
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_params(params: Generator[torch.Tensor, None, None],
coalesce: bool = True,
bucket_size_mb: int = -1) -> None:
"""All-reduce parameters.
Args:
params (Generator[torch.Tensor, None, None]): List of parameters or
buffers of a model.
coalesce (bool, optional): Whether to reduce parameters as a whole.
Defaults to True.
bucket_size_mb (int, optional): Size of bucket, the unit is MB.
Defaults to -1.
"""
_, world_size = get_dist_info()
if world_size == 1:
return
params_data = [param.data for param in params]
if coalesce:
_allreduce_coalesced(params_data, world_size, bucket_size_mb)
else:
for tensor in params_data:
all_reduce(tensor.div_(world_size))
@HOOKS.register_module()
class SyncBuffersHook(Hook):
"""Synchronize model buffers such as running_mean and running_var in BN at
the end of each epoch."""
def __init__(self) -> None:
self.distributed = dist.IS_DIST
def after_epoch(self, runner: object) -> None:
"""All-reduce model buffers at the end of each epoch.
Args:
runner (object): The runner of the training process.
"""
if self.distributed:
allreduce_params(runner.model.buffers()) # type: ignore
|
"""
===================================
How to write your own v2 transforms
===================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_v2_transforms_plot_custom_transforms.py>` to download the full example code.
This guide explains how to write transforms that are compatible with the
torchvision transforms V2 API.
"""
# %%
import torch
from torchvision import datapoints
from torchvision.transforms import v2
# %%
# Just create a ``nn.Module`` and override the ``forward`` method
# ===============================================================
#
# In most cases, this is all you're going to need, as long as you already know
# the structure of the input that your transform will expect. For example if
# you're just doing image classification, your transform will typically accept a
# single image as input, or a ``(img, label)`` input. So you can just hard-code
# your ``forward`` method to accept just that, e.g.
#
# .. code:: python
#
# class MyCustomTransform(torch.nn.Module):
# def forward(self, img, label):
# # Do some transformations
# return new_img, new_label
#
# .. note::
#
# This means that if you have a custom transform that is already compatible
# with the V1 transforms (those in ``torchvision.transforms``), it will
# still work with the V2 transforms without any change!
#
# We will illustrate this more completely below with a typical detection case,
# where our samples are just images, bounding boxes and labels:
class MyCustomTransform(torch.nn.Module):
def forward(self, img, bboxes, label): # we assume inputs are always structured like this
print(
f"I'm transforming an image of shape {img.shape} "
f"with bboxes = {bboxes}\n{label = }"
)
# Do some transformations. Here, we're just passing though the input
return img, bboxes, label
transforms = v2.Compose([
MyCustomTransform(),
v2.RandomResizedCrop((224, 224), antialias=True),
v2.RandomHorizontalFlip(p=1),
v2.Normalize(mean=[0, 0, 0], std=[1, 1, 1])
])
H, W = 256, 256
img = torch.rand(3, H, W)
bboxes = datapoints.BoundingBoxes(
torch.tensor([[0, 10, 10, 20], [50, 50, 70, 70]]),
format="XYXY",
canvas_size=(H, W)
)
label = 3
out_img, out_bboxes, out_label = transforms(img, bboxes, label)
# %%
print(f"Output image shape: {out_img.shape}\nout_bboxes = {out_bboxes}\n{out_label = }")
# %%
# .. note::
# While working with datapoint classes in your code, make sure to
# familiarize yourself with this section:
# :ref:`datapoint_unwrapping_behaviour`
#
# Supporting arbitrary input structures
# =====================================
#
# In the section above, we have assumed that you already know the structure of
# your inputs and that you're OK with hard-coding this expected structure in
# your code. If you want your custom transforms to be as flexible as possible,
# this can be a bit limitting.
#
# A key feature of the builtin Torchvision V2 transforms is that they can accept
# arbitrary input structure and return the same structure as output (with
# transformed entries). For example, transforms can accept a single image, or a
# tuple of ``(img, label)``, or an arbitrary nested dictionary as input:
structured_input = {
"img": img,
"annotations": (bboxes, label),
"something_that_will_be_ignored": (1, "hello")
}
structured_output = v2.RandomHorizontalFlip(p=1)(structured_input)
assert isinstance(structured_output, dict)
assert structured_output["something_that_will_be_ignored"] == (1, "hello")
print(f"The transformed bboxes are:\n{structured_output['annotations'][0]}")
# %%
# If you want to reproduce this behavior in your own transform, we invite you to
# look at our `code
# <https://github.com/pytorch/vision/blob/main/torchvision/transforms/v2/_transform.py>`_
# and adapt it to your needs.
#
# In brief, the core logic is to unpack the input into a flat list using `pytree
# <https://github.com/pytorch/pytorch/blob/main/torch/utils/_pytree.py>`_, and
# then transform only the entries that can be transformed (the decision is made
# based on the **class** of the entries, as all datapoints are
# tensor-subclasses) plus some custom logic that is out of score here - check the
# code for details. The (potentially transformed) entries are then repacked and
# returned, in the same structure as the input.
#
# We do not provide public dev-facing tools to achieve that at this time, but if
# this is something that would be valuable to you, please let us know by opening
# an issue on our `GitHub repo <https://github.com/pytorch/vision/issues>`_.
|
"""
===================================
How to write your own v2 transforms
===================================
This guide explains how to write transforms that are compatible with the
torchvision transforms V2 API.
"""
# %%
import torch
from torchvision import datapoints
from torchvision.transforms import v2
# %%
# Just create a ``nn.Module`` and override the ``forward`` method
# ===============================================================
#
# In most cases, this is all you're going to need, as long as you already know
# the structure of the input that your transform will expect. For example if
# you're just doing image classification, your transform will typically accept a
# single image as input, or a ``(img, label)`` input. So you can just hard-code
# your ``forward`` method to accept just that, e.g.
#
# .. code:: python
#
# class MyCustomTransform(torch.nn.Module):
# def forward(self, img, label):
# # Do some transformations
# return new_img, new_label
#
# .. note::
#
# This means that if you have a custom transform that is already compatible
# with the V1 transforms (those in ``torchvision.transforms``), it will
# still work with the V2 transforms without any change!
#
# We will illustrate this more completely below with a typical detection case,
# where our samples are just images, bounding boxes and labels:
class MyCustomTransform(torch.nn.Module):
def forward(self, img, bboxes, label): # we assume inputs are always structured like this
print(
f"I'm transforming an image of shape {img.shape} "
f"with bboxes = {bboxes}\n{label = }"
)
# Do some transformations. Here, we're just passing though the input
return img, bboxes, label
transforms = v2.Compose([
MyCustomTransform(),
v2.RandomResizedCrop((224, 224), antialias=True),
v2.RandomHorizontalFlip(p=1),
v2.Normalize(mean=[0, 0, 0], std=[1, 1, 1])
])
H, W = 256, 256
img = torch.rand(3, H, W)
bboxes = datapoints.BoundingBoxes(
torch.tensor([[0, 10, 10, 20], [50, 50, 70, 70]]),
format="XYXY",
canvas_size=(H, W)
)
label = 3
out_img, out_bboxes, out_label = transforms(img, bboxes, label)
# %%
print(f"Output image shape: {out_img.shape}\nout_bboxes = {out_bboxes}\n{out_label = }")
# %%
# .. note::
# While working with datapoint classes in your code, make sure to
# familiarize yourself with this section:
# :ref:`datapoint_unwrapping_behaviour`
#
# Supporting arbitrary input structures
# =====================================
#
# In the section above, we have assumed that you already know the structure of
# your inputs and that you're OK with hard-coding this expected structure in
# your code. If you want your custom transforms to be as flexible as possible,
# this can be a bit limitting.
#
# A key feature of the builtin Torchvision V2 transforms is that they can accept
# arbitrary input structure and return the same structure as output (with
# transformed entries). For example, transforms can accept a single image, or a
# tuple of ``(img, label)``, or an arbitrary nested dictionary as input:
structured_input = {
"img": img,
"annotations": (bboxes, label),
"something_that_will_be_ignored": (1, "hello")
}
structured_output = v2.RandomHorizontalFlip(p=1)(structured_input)
assert isinstance(structured_output, dict)
assert structured_output["something_that_will_be_ignored"] == (1, "hello")
print(f"The transformed bboxes are:\n{structured_output['annotations'][0]}")
# %%
# If you want to reproduce this behavior in your own transform, we invite you to
# look at our `code
# <https://github.com/pytorch/vision/blob/main/torchvision/transforms/v2/_transform.py>`_
# and adapt it to your needs.
#
# In brief, the core logic is to unpack the input into a flat list using `pytree
# <https://github.com/pytorch/pytorch/blob/main/torch/utils/_pytree.py>`_, and
# then transform only the entries that can be transformed (the decision is made
# based on the **class** of the entries, as all datapoints are
# tensor-subclasses) plus some custom logic that is out of score here - check the
# code for details. The (potentially transformed) entries are then repacked and
# returned, in the same structure as the input.
#
# We do not provide public dev-facing tools to achieve that at this time, but if
# this is something that would be valuable to you, please let us know by opening
# an issue on our `GitHub repo <https://github.com/pytorch/vision/issues>`_.
|
# noqa: D300,D400
# Copyright (c) 2016, Aaron Christianson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Monkey patch setuptools to write faster console_scripts with this format:
import sys
from mymodule import entry_function
sys.exit(entry_function())
This is better.
(c) 2016, Aaron Christianson
http://github.com/ninjaaron/fast-entry_points
'''
from setuptools.command import easy_install
import re
TEMPLATE = r'''
# -*- coding: utf-8 -*-
# EASY-INSTALL-ENTRY-SCRIPT: '{3}','{4}','{5}'
__requires__ = '{3}'
import re
import sys
from {0} import {1}
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit({2}())
'''.lstrip()
@classmethod
def get_args(cls, dist, header=None): # noqa: D205,D400
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
# pylint: disable=E1101
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
# ensure_safe_name
if re.search(r'[\\/]', name):
raise ValueError("Path separators not allowed in script names")
script_text = TEMPLATE.format(
ep.module_name, ep.attrs[0], '.'.join(ep.attrs), spec, group, name
)
# pylint: disable=E1101
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
# pylint: disable=E1101
easy_install.ScriptWriter.get_args = get_args
def main():
import os
import re
import shutil
import sys
dests = sys.argv[1:] or ['.']
filename = re.sub(r'\.pyc$', '.py', __file__)
for dst in dests:
shutil.copy(filename, dst)
manifest_path = os.path.join(dst, 'MANIFEST.in')
setup_path = os.path.join(dst, 'setup.py')
# Insert the include statement to MANIFEST.in if not present
with open(manifest_path, 'a+', encoding='utf-8') as manifest:
manifest.seek(0)
manifest_content = manifest.read()
if 'include fastentrypoints.py' not in manifest_content:
manifest.write(
('\n' if manifest_content else '') + 'include fastentrypoints.py'
)
# Insert the import statement to setup.py if not present
with open(setup_path, 'a+', encoding='utf-8') as setup:
setup.seek(0)
setup_content = setup.read()
if 'import fastentrypoints' not in setup_content:
setup.seek(0)
setup.truncate()
setup.write('import fastentrypoints\n' + setup_content)
|
# noqa: D300,D400
# Copyright (c) 2016, Aaron Christianson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Monkey patch setuptools to write faster console_scripts with this format:
import sys
from mymodule import entry_function
sys.exit(entry_function())
This is better.
(c) 2016, Aaron Christianson
http://github.com/ninjaaron/fast-entry_points
'''
from setuptools.command import easy_install
import re
TEMPLATE = r'''
# -*- coding: utf-8 -*-
# EASY-INSTALL-ENTRY-SCRIPT: '{3}','{4}','{5}'
__requires__ = '{3}'
import re
import sys
from {0} import {1}
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit({2}())
'''.lstrip()
@classmethod
def get_args(cls, dist, header=None): # noqa: D205,D400
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
# pylint: disable=E1101
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
# ensure_safe_name
if re.search(r'[\\/]', name):
raise ValueError("Path separators not allowed in script names")
script_text = TEMPLATE.format(
ep.module_name, ep.attrs[0], '.'.join(ep.attrs), spec, group, name
)
# pylint: disable=E1101
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
# pylint: disable=E1101
easy_install.ScriptWriter.get_args = get_args
def main():
import os
import re
import shutil
import sys
dests = sys.argv[1:] or ['.']
filename = re.sub(r'\.pyc$', '.py', __file__)
for dst in dests:
shutil.copy(filename, dst)
manifest_path = os.path.join(dst, 'MANIFEST.in')
setup_path = os.path.join(dst, 'setup.py')
# Insert the include statement to MANIFEST.in if not present
with open(manifest_path, 'a+') as manifest:
manifest.seek(0)
manifest_content = manifest.read()
if 'include fastentrypoints.py' not in manifest_content:
manifest.write(
('\n' if manifest_content else '') + 'include fastentrypoints.py'
)
# Insert the import statement to setup.py if not present
with open(setup_path, 'a+') as setup:
setup.seek(0)
setup_content = setup.read()
if 'import fastentrypoints' not in setup_content:
setup.seek(0)
setup.truncate()
setup.write('import fastentrypoints\n' + setup_content)
|
"""
==========================================================
Demonstrating the different strategies of KBinsDiscretizer
==========================================================
This example presents the different strategies implemented in KBinsDiscretizer:
- 'uniform': The discretization is uniform in each feature, which means that
the bin widths are constant in each dimension.
- quantile': The discretization is done on the quantiled values, which means
that each bin has approximately the same number of samples.
- 'kmeans': The discretization is based on the centroids of a KMeans clustering
procedure.
The plot shows the regions where the discretized encoding is constant.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.preprocessing import KBinsDiscretizer
strategies = ["uniform", "quantile", "kmeans"]
n_samples = 200
centers_0 = np.array([[0, 0], [0, 5], [2, 4], [8, 8]])
centers_1 = np.array([[0, 0], [3, 1]])
# construct the datasets
random_state = 42
X_list = [
np.random.RandomState(random_state).uniform(-3, 3, size=(n_samples, 2)),
make_blobs(
n_samples=[
n_samples // 10,
n_samples * 4 // 10,
n_samples // 10,
n_samples * 4 // 10,
],
cluster_std=0.5,
centers=centers_0,
random_state=random_state,
)[0],
make_blobs(
n_samples=[n_samples // 5, n_samples * 4 // 5],
cluster_std=0.5,
centers=centers_1,
random_state=random_state,
)[0],
]
figure = plt.figure(figsize=(14, 9))
i = 1
for ds_cnt, X in enumerate(X_list):
ax = plt.subplot(len(X_list), len(strategies) + 1, i)
ax.scatter(X[:, 0], X[:, 1], edgecolors="k")
if ds_cnt == 0:
ax.set_title("Input data", size=14)
xx, yy = np.meshgrid(
np.linspace(X[:, 0].min(), X[:, 0].max(), 300),
np.linspace(X[:, 1].min(), X[:, 1].max(), 300),
)
grid = np.c_[xx.ravel(), yy.ravel()]
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# transform the dataset with KBinsDiscretizer
for strategy in strategies:
enc = KBinsDiscretizer(
n_bins=4,
encode="ordinal",
quantile_method="averaged_inverted_cdf",
strategy=strategy,
)
enc.fit(X)
grid_encoded = enc.transform(grid)
ax = plt.subplot(len(X_list), len(strategies) + 1, i)
# horizontal stripes
horizontal = grid_encoded[:, 0].reshape(xx.shape)
ax.contourf(xx, yy, horizontal, alpha=0.5)
# vertical stripes
vertical = grid_encoded[:, 1].reshape(xx.shape)
ax.contourf(xx, yy, vertical, alpha=0.5)
ax.scatter(X[:, 0], X[:, 1], edgecolors="k")
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title("strategy='%s'" % (strategy,), size=14)
i += 1
plt.tight_layout()
plt.show()
|
"""
==========================================================
Demonstrating the different strategies of KBinsDiscretizer
==========================================================
This example presents the different strategies implemented in KBinsDiscretizer:
- 'uniform': The discretization is uniform in each feature, which means that
the bin widths are constant in each dimension.
- quantile': The discretization is done on the quantiled values, which means
that each bin has approximately the same number of samples.
- 'kmeans': The discretization is based on the centroids of a KMeans clustering
procedure.
The plot shows the regions where the discretized encoding is constant.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.preprocessing import KBinsDiscretizer
strategies = ["uniform", "quantile", "kmeans"]
n_samples = 200
centers_0 = np.array([[0, 0], [0, 5], [2, 4], [8, 8]])
centers_1 = np.array([[0, 0], [3, 1]])
# construct the datasets
random_state = 42
X_list = [
np.random.RandomState(random_state).uniform(-3, 3, size=(n_samples, 2)),
make_blobs(
n_samples=[
n_samples // 10,
n_samples * 4 // 10,
n_samples // 10,
n_samples * 4 // 10,
],
cluster_std=0.5,
centers=centers_0,
random_state=random_state,
)[0],
make_blobs(
n_samples=[n_samples // 5, n_samples * 4 // 5],
cluster_std=0.5,
centers=centers_1,
random_state=random_state,
)[0],
]
figure = plt.figure(figsize=(14, 9))
i = 1
for ds_cnt, X in enumerate(X_list):
ax = plt.subplot(len(X_list), len(strategies) + 1, i)
ax.scatter(X[:, 0], X[:, 1], edgecolors="k")
if ds_cnt == 0:
ax.set_title("Input data", size=14)
xx, yy = np.meshgrid(
np.linspace(X[:, 0].min(), X[:, 0].max(), 300),
np.linspace(X[:, 1].min(), X[:, 1].max(), 300),
)
grid = np.c_[xx.ravel(), yy.ravel()]
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# transform the dataset with KBinsDiscretizer
for strategy in strategies:
enc = KBinsDiscretizer(n_bins=4, encode="ordinal", strategy=strategy)
enc.fit(X)
grid_encoded = enc.transform(grid)
ax = plt.subplot(len(X_list), len(strategies) + 1, i)
# horizontal stripes
horizontal = grid_encoded[:, 0].reshape(xx.shape)
ax.contourf(xx, yy, horizontal, alpha=0.5)
# vertical stripes
vertical = grid_encoded[:, 1].reshape(xx.shape)
ax.contourf(xx, yy, vertical, alpha=0.5)
ax.scatter(X[:, 0], X[:, 1], edgecolors="k")
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title("strategy='%s'" % (strategy,), size=14)
i += 1
plt.tight_layout()
plt.show()
|
"""
===================================
How to write your own v2 transforms
===================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_custom_transforms.py>` to download the full example code.
This guide explains how to write transforms that are compatible with the
torchvision transforms V2 API.
"""
# %%
import torch
from torchvision import tv_tensors
from torchvision.transforms import v2
# %%
# Just create a ``nn.Module`` and override the ``forward`` method
# ===============================================================
#
# In most cases, this is all you're going to need, as long as you already know
# the structure of the input that your transform will expect. For example if
# you're just doing image classification, your transform will typically accept a
# single image as input, or a ``(img, label)`` input. So you can just hard-code
# your ``forward`` method to accept just that, e.g.
#
# .. code:: python
#
# class MyCustomTransform(torch.nn.Module):
# def forward(self, img, label):
# # Do some transformations
# return new_img, new_label
#
# .. note::
#
# This means that if you have a custom transform that is already compatible
# with the V1 transforms (those in ``torchvision.transforms``), it will
# still work with the V2 transforms without any change!
#
# We will illustrate this more completely below with a typical detection case,
# where our samples are just images, bounding boxes and labels:
class MyCustomTransform(torch.nn.Module):
def forward(self, img, bboxes, label): # we assume inputs are always structured like this
print(
f"I'm transforming an image of shape {img.shape} "
f"with bboxes = {bboxes}\n{label = }"
)
# Do some transformations. Here, we're just passing though the input
return img, bboxes, label
transforms = v2.Compose([
MyCustomTransform(),
v2.RandomResizedCrop((224, 224), antialias=True),
v2.RandomHorizontalFlip(p=1),
v2.Normalize(mean=[0, 0, 0], std=[1, 1, 1])
])
H, W = 256, 256
img = torch.rand(3, H, W)
bboxes = tv_tensors.BoundingBoxes(
torch.tensor([[0, 10, 10, 20], [50, 50, 70, 70]]),
format="XYXY",
canvas_size=(H, W)
)
label = 3
out_img, out_bboxes, out_label = transforms(img, bboxes, label)
# %%
print(f"Output image shape: {out_img.shape}\nout_bboxes = {out_bboxes}\n{out_label = }")
# %%
# .. note::
# While working with tv_tensor classes in your code, make sure to
# familiarize yourself with this section:
# :ref:`tv_tensor_unwrapping_behaviour`
#
# Supporting arbitrary input structures
# =====================================
#
# In the section above, we have assumed that you already know the structure of
# your inputs and that you're OK with hard-coding this expected structure in
# your code. If you want your custom transforms to be as flexible as possible,
# this can be a bit limiting.
#
# A key feature of the builtin Torchvision V2 transforms is that they can accept
# arbitrary input structure and return the same structure as output (with
# transformed entries). For example, transforms can accept a single image, or a
# tuple of ``(img, label)``, or an arbitrary nested dictionary as input:
structured_input = {
"img": img,
"annotations": (bboxes, label),
"something_that_will_be_ignored": (1, "hello")
}
structured_output = v2.RandomHorizontalFlip(p=1)(structured_input)
assert isinstance(structured_output, dict)
assert structured_output["something_that_will_be_ignored"] == (1, "hello")
print(f"The transformed bboxes are:\n{structured_output['annotations'][0]}")
# %%
# If you want to reproduce this behavior in your own transform, we invite you to
# look at our `code
# <https://github.com/pytorch/vision/blob/main/torchvision/transforms/v2/_transform.py>`_
# and adapt it to your needs.
#
# In brief, the core logic is to unpack the input into a flat list using `pytree
# <https://github.com/pytorch/pytorch/blob/main/torch/utils/_pytree.py>`_, and
# then transform only the entries that can be transformed (the decision is made
# based on the **class** of the entries, as all tv_tensors are
# tensor-subclasses) plus some custom logic that is out of score here - check the
# code for details. The (potentially transformed) entries are then repacked and
# returned, in the same structure as the input.
#
# We do not provide public dev-facing tools to achieve that at this time, but if
# this is something that would be valuable to you, please let us know by opening
# an issue on our `GitHub repo <https://github.com/pytorch/vision/issues>`_.
|
"""
===================================
How to write your own v2 transforms
===================================
.. note::
Try on `collab <https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/main/_generated_ipynb_notebooks/plot_custom_transforms.ipynb>`_
or :ref:`go to the end <sphx_glr_download_auto_examples_transforms_plot_custom_transforms.py>` to download the full example code.
This guide explains how to write transforms that are compatible with the
torchvision transforms V2 API.
"""
# %%
import torch
from torchvision import datapoints
from torchvision.transforms import v2
# %%
# Just create a ``nn.Module`` and override the ``forward`` method
# ===============================================================
#
# In most cases, this is all you're going to need, as long as you already know
# the structure of the input that your transform will expect. For example if
# you're just doing image classification, your transform will typically accept a
# single image as input, or a ``(img, label)`` input. So you can just hard-code
# your ``forward`` method to accept just that, e.g.
#
# .. code:: python
#
# class MyCustomTransform(torch.nn.Module):
# def forward(self, img, label):
# # Do some transformations
# return new_img, new_label
#
# .. note::
#
# This means that if you have a custom transform that is already compatible
# with the V1 transforms (those in ``torchvision.transforms``), it will
# still work with the V2 transforms without any change!
#
# We will illustrate this more completely below with a typical detection case,
# where our samples are just images, bounding boxes and labels:
class MyCustomTransform(torch.nn.Module):
def forward(self, img, bboxes, label): # we assume inputs are always structured like this
print(
f"I'm transforming an image of shape {img.shape} "
f"with bboxes = {bboxes}\n{label = }"
)
# Do some transformations. Here, we're just passing though the input
return img, bboxes, label
transforms = v2.Compose([
MyCustomTransform(),
v2.RandomResizedCrop((224, 224), antialias=True),
v2.RandomHorizontalFlip(p=1),
v2.Normalize(mean=[0, 0, 0], std=[1, 1, 1])
])
H, W = 256, 256
img = torch.rand(3, H, W)
bboxes = datapoints.BoundingBoxes(
torch.tensor([[0, 10, 10, 20], [50, 50, 70, 70]]),
format="XYXY",
canvas_size=(H, W)
)
label = 3
out_img, out_bboxes, out_label = transforms(img, bboxes, label)
# %%
print(f"Output image shape: {out_img.shape}\nout_bboxes = {out_bboxes}\n{out_label = }")
# %%
# .. note::
# While working with datapoint classes in your code, make sure to
# familiarize yourself with this section:
# :ref:`datapoint_unwrapping_behaviour`
#
# Supporting arbitrary input structures
# =====================================
#
# In the section above, we have assumed that you already know the structure of
# your inputs and that you're OK with hard-coding this expected structure in
# your code. If you want your custom transforms to be as flexible as possible,
# this can be a bit limiting.
#
# A key feature of the builtin Torchvision V2 transforms is that they can accept
# arbitrary input structure and return the same structure as output (with
# transformed entries). For example, transforms can accept a single image, or a
# tuple of ``(img, label)``, or an arbitrary nested dictionary as input:
structured_input = {
"img": img,
"annotations": (bboxes, label),
"something_that_will_be_ignored": (1, "hello")
}
structured_output = v2.RandomHorizontalFlip(p=1)(structured_input)
assert isinstance(structured_output, dict)
assert structured_output["something_that_will_be_ignored"] == (1, "hello")
print(f"The transformed bboxes are:\n{structured_output['annotations'][0]}")
# %%
# If you want to reproduce this behavior in your own transform, we invite you to
# look at our `code
# <https://github.com/pytorch/vision/blob/main/torchvision/transforms/v2/_transform.py>`_
# and adapt it to your needs.
#
# In brief, the core logic is to unpack the input into a flat list using `pytree
# <https://github.com/pytorch/pytorch/blob/main/torch/utils/_pytree.py>`_, and
# then transform only the entries that can be transformed (the decision is made
# based on the **class** of the entries, as all datapoints are
# tensor-subclasses) plus some custom logic that is out of score here - check the
# code for details. The (potentially transformed) entries are then repacked and
# returned, in the same structure as the input.
#
# We do not provide public dev-facing tools to achieve that at this time, but if
# this is something that would be valuable to you, please let us know by opening
# an issue on our `GitHub repo <https://github.com/pytorch/vision/issues>`_.
|
class AudioMetaData:
"""AudioMetaData()
Return type of ``torchaudio.info`` function.
:ivar int sample_rate: Sample rate
:ivar int num_frames: The number of frames
:ivar int num_channels: The number of channels
:ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats,
or when it cannot be accurately inferred.
:ivar str encoding: Audio encoding
The values encoding can take are one of the following:
* ``PCM_S``: Signed integer linear PCM
* ``PCM_U``: Unsigned integer linear PCM
* ``PCM_F``: Floating point linear PCM
* ``FLAC``: Flac, Free Lossless Audio Codec
* ``ULAW``: Mu-law
* ``ALAW``: A-law
* ``MP3`` : MP3, MPEG-1 Audio Layer III
* ``VORBIS``: OGG Vorbis
* ``AMR_WB``: Adaptive Multi-Rate Wideband
* ``AMR_NB``: Adaptive Multi-Rate Narrowband
* ``OPUS``: Opus
* ``HTK``: Single channel 16-bit PCM
* ``UNKNOWN`` : None of above
"""
def __init__(
self,
sample_rate: int,
num_frames: int,
num_channels: int,
bits_per_sample: int,
encoding: str,
):
self.sample_rate = sample_rate
self.num_frames = num_frames
self.num_channels = num_channels
self.bits_per_sample = bits_per_sample
self.encoding = encoding
def __str__(self):
return (
f"AudioMetaData("
f"sample_rate={self.sample_rate}, "
f"num_frames={self.num_frames}, "
f"num_channels={self.num_channels}, "
f"bits_per_sample={self.bits_per_sample}, "
f"encoding={self.encoding}"
f")"
)
|
class AudioMetaData:
"""Return type of ``torchaudio.info`` function.
This class is used by :py:mod:`"sox_io" backend<torchaudio.backends.sox_io_backend>` and
:py:mod:`"soundfile" backend<torchaudio.backends.soundfile_backend>`.
:ivar int sample_rate: Sample rate
:ivar int num_frames: The number of frames
:ivar int num_channels: The number of channels
:ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats,
or when it cannot be accurately inferred.
:ivar str encoding: Audio encoding
The values encoding can take are one of the following:
* ``PCM_S``: Signed integer linear PCM
* ``PCM_U``: Unsigned integer linear PCM
* ``PCM_F``: Floating point linear PCM
* ``FLAC``: Flac, Free Lossless Audio Codec
* ``ULAW``: Mu-law
* ``ALAW``: A-law
* ``MP3`` : MP3, MPEG-1 Audio Layer III
* ``VORBIS``: OGG Vorbis
* ``AMR_WB``: Adaptive Multi-Rate Wideband
* ``AMR_NB``: Adaptive Multi-Rate Narrowband
* ``OPUS``: Opus
* ``HTK``: Single channel 16-bit PCM
* ``UNKNOWN`` : None of above
"""
def __init__(
self,
sample_rate: int,
num_frames: int,
num_channels: int,
bits_per_sample: int,
encoding: str,
):
self.sample_rate = sample_rate
self.num_frames = num_frames
self.num_channels = num_channels
self.bits_per_sample = bits_per_sample
self.encoding = encoding
def __str__(self):
return (
f"AudioMetaData("
f"sample_rate={self.sample_rate}, "
f"num_frames={self.num_frames}, "
f"num_channels={self.num_channels}, "
f"bits_per_sample={self.bits_per_sample}, "
f"encoding={self.encoding}"
f")"
)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import (
SingleStoreDBChatMessageHistory,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SingleStoreDBChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SingleStoreDBChatMessageHistory",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import (
SingleStoreDBChatMessageHistory,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SingleStoreDBChatMessageHistory": "langchain_community.chat_message_histories"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SingleStoreDBChatMessageHistory",
]
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.common_utils import raise_on_run_directly
from torch.testing._internal.jit_utils import JitTestCase
class TestIgnoreContextManager(JitTestCase):
def test_with_ignore_context_manager_with_inp_out(self):
class A(torch.nn.Module):
def forward(self):
a: int = 4
b: int = 5
c: int = 0
d: int = 6
with torch.jit._IgnoreContextManager(
a="inp:int", b="inp:int", c="out:int", d="out:int"
):
l = [2 for i in range(a) if i > 2]
c = l[0] + a + b
d = 9
return c + d
model = A()
s = torch.jit.script(model)
self.assertEqual(s(), model())
self.assertEqual(s(), 20)
class B(torch.nn.Module):
def forward(self):
a: int = 4
b: int = 5
c: int = 0
with torch.jit._IgnoreContextManager(
a="inp:int", b="inp:int", c="out:int"
):
l = [2 for i in range(a) if i > 2]
c = l[0] + a + b
return c
model = B()
s = torch.jit.script(model)
self.assertEqual(s(), 11)
self.assertEqual(s(), model())
class C(torch.nn.Module):
def forward(self):
a: int = 4
b: int = 5
with torch.jit._IgnoreContextManager(a="inp:int", b="out:int"):
l = [2 for i in range(a) if i > 2]
b = l[0] + a
return b
model = C()
s = torch.jit.script(model)
self.assertEqual(s(), 6)
self.assertEqual(s(), model())
def test_with_ignore_context_manager_with_just_inp(self):
class A(torch.nn.Module):
def forward(self):
a: int = 4
b: int = 5
with torch.jit._IgnoreContextManager(a="inp:int", b="inp:int"):
l = [2 + b for i in range(a) if i > 2] # noqa: F841
return a
model = A()
s = torch.jit.script(model)
self.assertEqual(s(), 4)
self.assertEqual(s(), model())
def test_with_ignore_context_manager_with_just_out(self):
class A(torch.nn.Module):
def forward(self):
with torch.jit._IgnoreContextManager(c="out:List[int]"):
c = [2 for i in range(7) if i > 2]
c[0] = 3
return c[0] + c[1]
model = A()
s = torch.jit.script(model)
self.assertEqual(s(), 5)
self.assertEqual(s(), model())
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
|
# Owner(s): ["oncall: jit"]
import os
import sys
import unittest
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.jit.frontend import _IS_ASTUNPARSE_INSTALLED
from torch.testing._internal.common_utils import raise_on_run_directly
from torch.testing._internal.jit_utils import JitTestCase
class TestIgnoreContextManager(JitTestCase):
@unittest.skipUnless(_IS_ASTUNPARSE_INSTALLED, "astunparse package is required")
def test_with_ignore_context_manager_with_inp_out(self):
class A(torch.nn.Module):
def forward(self):
a: int = 4
b: int = 5
c: int = 0
d: int = 6
with torch.jit._IgnoreContextManager(
a="inp:int", b="inp:int", c="out:int", d="out:int"
):
l = [2 for i in range(a) if i > 2]
c = l[0] + a + b
d = 9
return c + d
model = A()
s = torch.jit.script(model)
self.assertEqual(s(), model())
self.assertEqual(s(), 20)
class B(torch.nn.Module):
def forward(self):
a: int = 4
b: int = 5
c: int = 0
with torch.jit._IgnoreContextManager(
a="inp:int", b="inp:int", c="out:int"
):
l = [2 for i in range(a) if i > 2]
c = l[0] + a + b
return c
model = B()
s = torch.jit.script(model)
self.assertEqual(s(), 11)
self.assertEqual(s(), model())
class C(torch.nn.Module):
def forward(self):
a: int = 4
b: int = 5
with torch.jit._IgnoreContextManager(a="inp:int", b="out:int"):
l = [2 for i in range(a) if i > 2]
b = l[0] + a
return b
model = C()
s = torch.jit.script(model)
self.assertEqual(s(), 6)
self.assertEqual(s(), model())
@unittest.skipUnless(_IS_ASTUNPARSE_INSTALLED, "astunparse package is required")
def test_with_ignore_context_manager_with_just_inp(self):
class A(torch.nn.Module):
def forward(self):
a: int = 4
b: int = 5
with torch.jit._IgnoreContextManager(a="inp:int", b="inp:int"):
l = [2 + b for i in range(a) if i > 2] # noqa: F841
return a
model = A()
s = torch.jit.script(model)
self.assertEqual(s(), 4)
self.assertEqual(s(), model())
@unittest.skipUnless(_IS_ASTUNPARSE_INSTALLED, "astunparse package is required")
def test_with_ignore_context_manager_with_just_out(self):
class A(torch.nn.Module):
def forward(self):
with torch.jit._IgnoreContextManager(c="out:List[int]"):
c = [2 for i in range(7) if i > 2]
c[0] = 3
return c[0] + c[1]
model = A()
s = torch.jit.script(model)
self.assertEqual(s(), 5)
self.assertEqual(s(), model())
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
|
"""Init file of LlamaIndex."""
__version__ = "0.12.14"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.13"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Test loaders for common functionality."""
import inspect
import os
import numpy as np
import pytest
import sklearn.datasets
def is_pillow_installed():
try:
import PIL # noqa: F401
return True
except ImportError:
return False
FETCH_PYTEST_MARKERS = {
"return_X_y": {
"fetch_20newsgroups": pytest.mark.xfail(
reason="X is a list and does not have a shape argument"
),
"fetch_openml": pytest.mark.xfail(
reason="fetch_opeml requires a dataset name or id"
),
"fetch_lfw_people": pytest.mark.skipif(
not is_pillow_installed(), reason="pillow is not installed"
),
},
"as_frame": {
"fetch_openml": pytest.mark.xfail(
reason="fetch_opeml requires a dataset name or id"
),
},
}
def check_pandas_dependency_message(fetch_func):
try:
import pandas # noqa: F401
pytest.skip("This test requires pandas to not be installed")
except ImportError:
# Check that pandas is imported lazily and that an informative error
# message is raised when pandas is missing:
name = fetch_func.__name__
expected_msg = f"{name} with as_frame=True requires pandas"
with pytest.raises(ImportError, match=expected_msg):
fetch_func(as_frame=True)
def check_return_X_y(bunch, dataset_func):
X_y_tuple = dataset_func(return_X_y=True)
assert isinstance(X_y_tuple, tuple)
assert X_y_tuple[0].shape == bunch.data.shape
assert X_y_tuple[1].shape == bunch.target.shape
def check_as_frame(
bunch, dataset_func, expected_data_dtype=None, expected_target_dtype=None
):
pd = pytest.importorskip("pandas")
frame_bunch = dataset_func(as_frame=True)
assert hasattr(frame_bunch, "frame")
assert isinstance(frame_bunch.frame, pd.DataFrame)
assert isinstance(frame_bunch.data, pd.DataFrame)
assert frame_bunch.data.shape == bunch.data.shape
if frame_bunch.target.ndim > 1:
assert isinstance(frame_bunch.target, pd.DataFrame)
else:
assert isinstance(frame_bunch.target, pd.Series)
assert frame_bunch.target.shape[0] == bunch.target.shape[0]
if expected_data_dtype is not None:
assert np.all(frame_bunch.data.dtypes == expected_data_dtype)
if expected_target_dtype is not None:
assert np.all(frame_bunch.target.dtypes == expected_target_dtype)
# Test for return_X_y and as_frame=True
frame_X, frame_y = dataset_func(as_frame=True, return_X_y=True)
assert isinstance(frame_X, pd.DataFrame)
if frame_y.ndim > 1:
assert isinstance(frame_X, pd.DataFrame)
else:
assert isinstance(frame_y, pd.Series)
def _skip_network_tests():
return os.environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "1"
def _generate_func_supporting_param(param, dataset_type=("load", "fetch")):
markers_fetch = FETCH_PYTEST_MARKERS.get(param, {})
for name, obj in inspect.getmembers(sklearn.datasets):
if not inspect.isfunction(obj):
continue
is_dataset_type = any([name.startswith(t) for t in dataset_type])
is_support_param = param in inspect.signature(obj).parameters
if is_dataset_type and is_support_param:
# check if we should skip if we don't have network support
marks = [
pytest.mark.skipif(
condition=name.startswith("fetch") and _skip_network_tests(),
reason="Skip because fetcher requires internet network",
)
]
if name in markers_fetch:
marks.append(markers_fetch[name])
yield pytest.param(name, obj, marks=marks)
@pytest.mark.parametrize(
"name, dataset_func", _generate_func_supporting_param("return_X_y")
)
def test_common_check_return_X_y(name, dataset_func):
bunch = dataset_func()
check_return_X_y(bunch, dataset_func)
@pytest.mark.parametrize(
"name, dataset_func", _generate_func_supporting_param("as_frame")
)
def test_common_check_as_frame(name, dataset_func):
bunch = dataset_func()
check_as_frame(bunch, dataset_func)
@pytest.mark.parametrize(
"name, dataset_func", _generate_func_supporting_param("as_frame")
)
def test_common_check_pandas_dependency(name, dataset_func):
check_pandas_dependency_message(dataset_func)
|
"""Test loaders for common functionality."""
import inspect
import os
import numpy as np
import pytest
import sklearn.datasets
def is_pillow_installed():
try:
import PIL # noqa
return True
except ImportError:
return False
FETCH_PYTEST_MARKERS = {
"return_X_y": {
"fetch_20newsgroups": pytest.mark.xfail(
reason="X is a list and does not have a shape argument"
),
"fetch_openml": pytest.mark.xfail(
reason="fetch_opeml requires a dataset name or id"
),
"fetch_lfw_people": pytest.mark.skipif(
not is_pillow_installed(), reason="pillow is not installed"
),
},
"as_frame": {
"fetch_openml": pytest.mark.xfail(
reason="fetch_opeml requires a dataset name or id"
),
},
}
def check_pandas_dependency_message(fetch_func):
try:
import pandas # noqa
pytest.skip("This test requires pandas to not be installed")
except ImportError:
# Check that pandas is imported lazily and that an informative error
# message is raised when pandas is missing:
name = fetch_func.__name__
expected_msg = f"{name} with as_frame=True requires pandas"
with pytest.raises(ImportError, match=expected_msg):
fetch_func(as_frame=True)
def check_return_X_y(bunch, dataset_func):
X_y_tuple = dataset_func(return_X_y=True)
assert isinstance(X_y_tuple, tuple)
assert X_y_tuple[0].shape == bunch.data.shape
assert X_y_tuple[1].shape == bunch.target.shape
def check_as_frame(
bunch, dataset_func, expected_data_dtype=None, expected_target_dtype=None
):
pd = pytest.importorskip("pandas")
frame_bunch = dataset_func(as_frame=True)
assert hasattr(frame_bunch, "frame")
assert isinstance(frame_bunch.frame, pd.DataFrame)
assert isinstance(frame_bunch.data, pd.DataFrame)
assert frame_bunch.data.shape == bunch.data.shape
if frame_bunch.target.ndim > 1:
assert isinstance(frame_bunch.target, pd.DataFrame)
else:
assert isinstance(frame_bunch.target, pd.Series)
assert frame_bunch.target.shape[0] == bunch.target.shape[0]
if expected_data_dtype is not None:
assert np.all(frame_bunch.data.dtypes == expected_data_dtype)
if expected_target_dtype is not None:
assert np.all(frame_bunch.target.dtypes == expected_target_dtype)
# Test for return_X_y and as_frame=True
frame_X, frame_y = dataset_func(as_frame=True, return_X_y=True)
assert isinstance(frame_X, pd.DataFrame)
if frame_y.ndim > 1:
assert isinstance(frame_X, pd.DataFrame)
else:
assert isinstance(frame_y, pd.Series)
def _skip_network_tests():
return os.environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "1"
def _generate_func_supporting_param(param, dataset_type=("load", "fetch")):
markers_fetch = FETCH_PYTEST_MARKERS.get(param, {})
for name, obj in inspect.getmembers(sklearn.datasets):
if not inspect.isfunction(obj):
continue
is_dataset_type = any([name.startswith(t) for t in dataset_type])
is_support_param = param in inspect.signature(obj).parameters
if is_dataset_type and is_support_param:
# check if we should skip if we don't have network support
marks = [
pytest.mark.skipif(
condition=name.startswith("fetch") and _skip_network_tests(),
reason="Skip because fetcher requires internet network",
)
]
if name in markers_fetch:
marks.append(markers_fetch[name])
yield pytest.param(name, obj, marks=marks)
@pytest.mark.parametrize(
"name, dataset_func", _generate_func_supporting_param("return_X_y")
)
def test_common_check_return_X_y(name, dataset_func):
bunch = dataset_func()
check_return_X_y(bunch, dataset_func)
@pytest.mark.parametrize(
"name, dataset_func", _generate_func_supporting_param("as_frame")
)
def test_common_check_as_frame(name, dataset_func):
bunch = dataset_func()
check_as_frame(bunch, dataset_func)
@pytest.mark.parametrize(
"name, dataset_func", _generate_func_supporting_param("as_frame")
)
def test_common_check_pandas_dependency(name, dataset_func):
check_pandas_dependency_message(dataset_func)
|
from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import PIL.Image
import torch
from torchvision.transforms import InterpolationMode
from ._datapoint import Datapoint, FillTypeJIT
class Mask(Datapoint):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.prototype.transforms import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return tuple(self.shape[-2:]) # type: ignore[return-value]
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: Optional[Union[str, bool]] = "warn",
) -> Mask:
output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
antialias: Optional[Union[str, bool]] = "warn",
) -> Mask:
output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: List[int],
fill: Optional[Union[int, float, List[float]]] = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self.as_subclass(torch.Tensor),
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Mask:
output = self._F.perspective_mask(
self.as_subclass(torch.Tensor), startpoints, endpoints, fill=fill, coefficients=coefficients
)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
|
from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import PIL.Image
import torch
from torchvision.transforms import InterpolationMode
from ._datapoint import Datapoint, FillTypeJIT
class Mask(Datapoint):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.prototype.transforms import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return tuple(self.shape[-2:]) # type: ignore[return-value]
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: Optional[Union[str, bool]] = "warn",
) -> Mask:
output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
antialias: Optional[Union[str, bool]] = "warn",
) -> Mask:
output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: Union[int, List[int]],
fill: FillTypeJIT = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self.as_subclass(torch.Tensor),
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Mask:
output = self._F.perspective_mask(
self.as_subclass(torch.Tensor), startpoints, endpoints, fill=fill, coefficients=coefficients
)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
|
from . import _extension
from .api import CheckpointException
from .default_planner import DefaultLoadPlanner, DefaultSavePlanner
from .filesystem import FileSystemReader, FileSystemWriter
from .hf_storage import HuggingFaceStorageReader, HuggingFaceStorageWriter
from .metadata import (
BytesStorageMetadata,
ChunkStorageMetadata,
Metadata,
TensorStorageMetadata,
)
from .optimizer import load_sharded_optimizer_state_dict
from .planner import LoadPlan, LoadPlanner, ReadItem, SavePlan, SavePlanner, WriteItem
from .state_dict_loader import load, load_state_dict
from .state_dict_saver import async_save, save, save_state_dict
from .storage import StorageReader, StorageWriter
|
from . import _extension
from ._hf_planner import _HuggingFaceLoadPlanner, _HuggingFaceSavePlanner
from .api import CheckpointException
from .default_planner import DefaultLoadPlanner, DefaultSavePlanner
from .filesystem import FileSystemReader, FileSystemWriter
from .hf_storage import HuggingFaceStorageReader, HuggingFaceStorageWriter
from .metadata import (
BytesStorageMetadata,
ChunkStorageMetadata,
Metadata,
TensorStorageMetadata,
)
from .optimizer import load_sharded_optimizer_state_dict
from .planner import LoadPlan, LoadPlanner, ReadItem, SavePlan, SavePlanner, WriteItem
from .state_dict_loader import load, load_state_dict
from .state_dict_saver import async_save, save, save_state_dict
from .storage import StorageReader, StorageWriter
|
import logging
import re
from github import Github
from pydantic import BaseModel, SecretStr
from pydantic_settings import BaseSettings
class Settings(BaseSettings):
github_repository: str
github_token: SecretStr
deploy_url: str | None = None
commit_sha: str
run_id: int
is_done: bool = False
class LinkData(BaseModel):
previous_link: str
preview_link: str
en_link: str | None = None
def main() -> None:
logging.basicConfig(level=logging.INFO)
settings = Settings()
logging.info(f"Using config: {settings.model_dump_json()}")
g = Github(settings.github_token.get_secret_value())
repo = g.get_repo(settings.github_repository)
use_pr = next(
(pr for pr in repo.get_pulls() if pr.head.sha == settings.commit_sha), None
)
if not use_pr:
logging.error(f"No PR found for hash: {settings.commit_sha}")
return
commits = list(use_pr.get_commits())
current_commit = [c for c in commits if c.sha == settings.commit_sha][0]
run_url = f"https://github.com/{settings.github_repository}/actions/runs/{settings.run_id}"
if settings.is_done and not settings.deploy_url:
current_commit.create_status(
state="success",
description="No Docs Changes",
context="deploy-docs",
target_url=run_url,
)
logging.info("No docs changes found")
return
if not settings.deploy_url:
current_commit.create_status(
state="pending",
description="Deploying Docs",
context="deploy-docs",
target_url=run_url,
)
logging.info("No deploy URL available yet")
return
current_commit.create_status(
state="success",
description="Docs Deployed",
context="deploy-docs",
target_url=run_url,
)
files = list(use_pr.get_files())
docs_files = [f for f in files if f.filename.startswith("docs/")]
deploy_url = settings.deploy_url.rstrip("/")
lang_links: dict[str, list[LinkData]] = {}
for f in docs_files:
match = re.match(r"docs/([^/]+)/docs/(.*)", f.filename)
if not match:
continue
lang = match.group(1)
path = match.group(2)
if path.endswith("index.md"):
path = path.replace("index.md", "")
else:
path = path.replace(".md", "/")
en_path = path
if lang == "en":
use_path = en_path
else:
use_path = f"{lang}/{path}"
link = LinkData(
previous_link=f"https://fastapi.tiangolo.com/{use_path}",
preview_link=f"{deploy_url}/{use_path}",
)
if lang != "en":
link.en_link = f"https://fastapi.tiangolo.com/{en_path}"
lang_links.setdefault(lang, []).append(link)
links: list[LinkData] = []
en_links = lang_links.get("en", [])
en_links.sort(key=lambda x: x.preview_link)
links.extend(en_links)
langs = list(lang_links.keys())
langs.sort()
for lang in langs:
if lang == "en":
continue
current_lang_links = lang_links[lang]
current_lang_links.sort(key=lambda x: x.preview_link)
links.extend(current_lang_links)
message = f"📝 Docs preview for commit {settings.commit_sha} at: {deploy_url}"
if links:
message += "\n\n### Modified Pages\n\n"
for link in links:
message += f"* {link.preview_link}"
message += f" - ([before]({link.previous_link}))"
if link.en_link:
message += f" - ([English]({link.en_link}))"
message += "\n"
print(message)
use_pr.as_issue().create_comment(message)
logging.info("Finished")
if __name__ == "__main__":
main()
|
import logging
import re
from github import Github
from pydantic import SecretStr
from pydantic_settings import BaseSettings
class Settings(BaseSettings):
github_repository: str
github_token: SecretStr
deploy_url: str | None = None
commit_sha: str
run_id: int
is_done: bool = False
def main():
logging.basicConfig(level=logging.INFO)
settings = Settings()
logging.info(f"Using config: {settings.model_dump_json()}")
g = Github(settings.github_token.get_secret_value())
repo = g.get_repo(settings.github_repository)
use_pr = next(
(pr for pr in repo.get_pulls() if pr.head.sha == settings.commit_sha), None
)
if not use_pr:
logging.error(f"No PR found for hash: {settings.commit_sha}")
return
commits = list(use_pr.get_commits())
current_commit = [c for c in commits if c.sha == settings.commit_sha][0]
run_url = f"https://github.com/{settings.github_repository}/actions/runs/{settings.run_id}"
if settings.is_done and not settings.deploy_url:
current_commit.create_status(
state="success",
description="No Docs Changes",
context="deploy-docs",
target_url=run_url,
)
logging.info("No docs changes found")
return
if not settings.deploy_url:
current_commit.create_status(
state="pending",
description="Deploying Docs",
context="deploy-docs",
target_url=run_url,
)
logging.info("No deploy URL available yet")
return
current_commit.create_status(
state="success",
description="Docs Deployed",
context="deploy-docs",
target_url=run_url,
)
files = list(use_pr.get_files())
docs_files = [f for f in files if f.filename.startswith("docs/")]
deploy_url = settings.deploy_url.rstrip("/")
lang_links: dict[str, list[str]] = {}
for f in docs_files:
match = re.match(r"docs/([^/]+)/docs/(.*)", f.filename)
if not match:
continue
lang = match.group(1)
path = match.group(2)
if path.endswith("index.md"):
path = path.replace("index.md", "")
else:
path = path.replace(".md", "/")
if lang == "en":
link = f"{deploy_url}/{path}"
else:
link = f"{deploy_url}/{lang}/{path}"
lang_links.setdefault(lang, []).append(link)
links: list[str] = []
en_links = lang_links.get("en", [])
en_links.sort()
links.extend(en_links)
langs = list(lang_links.keys())
langs.sort()
for lang in langs:
if lang == "en":
continue
current_lang_links = lang_links[lang]
current_lang_links.sort()
links.extend(current_lang_links)
message = f"📝 Docs preview for commit {settings.commit_sha} at: {deploy_url}"
if links:
message += "\n\n### Modified Pages\n\n"
message += "\n".join([f"* {link}" for link in links])
print(message)
use_pr.as_issue().create_comment(message)
logging.info("Finished")
if __name__ == "__main__":
main()
|
import inspect
from keras.src.api_export import keras_export
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import STFTInitializer
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.saving import serialization_lib
from keras.src.utils.naming import to_snake_case
ALL_OBJECTS = {
Initializer,
Constant,
Identity,
Ones,
STFTInitializer,
Zeros,
GlorotNormal,
GlorotUniform,
HeNormal,
HeUniform,
LecunNormal,
LecunUniform,
RandomNormal,
TruncatedNormal,
RandomUniform,
VarianceScaling,
OrthogonalInitializer,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
# Aliases
ALL_OBJECTS_DICT.update(
{
"uniform": RandomUniform,
"normal": RandomNormal,
"orthogonal": OrthogonalInitializer,
"Orthogonal": OrthogonalInitializer, # Legacy
"one": Ones,
"zero": Zeros,
}
)
@keras_export("keras.initializers.serialize")
def serialize(initializer):
"""Returns the initializer configuration as a Python dict."""
return serialization_lib.serialize_keras_object(initializer)
@keras_export("keras.initializers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras initializer object via its configuration."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.initializers.get")
def get(identifier):
"""Retrieves a Keras initializer object via an identifier.
The `identifier` may be the string name of a initializers function or class
(case-sensitively).
>>> identifier = 'Ones'
>>> keras.initializers.deserialize(identifier)
<...keras.initializers.initializers.Ones...>
You can also specify `config` of the initializer to this function by passing
dict containing `class_name` and `config` as an identifier. Also note that
the `class_name` must map to a `Initializer` class.
>>> cfg = {'class_name': 'Ones', 'config': {}}
>>> keras.initializers.deserialize(cfg)
<...keras.initializers.initializers.Ones...>
In the case that the `identifier` is a class, this method will return a new
instance of the class by its constructor.
Args:
identifier: String or dict that contains the initializer name or
configurations.
Returns:
Initializer instance base on the input identifier.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
obj = deserialize(config)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(
f"Could not interpret initializer identifier: {identifier}"
)
|
import inspect
from keras.src.api_export import keras_export
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.saving import serialization_lib
from keras.src.utils.naming import to_snake_case
ALL_OBJECTS = {
Initializer,
Constant,
Identity,
Ones,
Zeros,
GlorotNormal,
GlorotUniform,
HeNormal,
HeUniform,
LecunNormal,
LecunUniform,
RandomNormal,
TruncatedNormal,
RandomUniform,
VarianceScaling,
OrthogonalInitializer,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
# Aliases
ALL_OBJECTS_DICT.update(
{
"uniform": RandomUniform,
"normal": RandomNormal,
"orthogonal": OrthogonalInitializer,
"Orthogonal": OrthogonalInitializer, # Legacy
"one": Ones,
"zero": Zeros,
}
)
@keras_export("keras.initializers.serialize")
def serialize(initializer):
"""Returns the initializer configuration as a Python dict."""
return serialization_lib.serialize_keras_object(initializer)
@keras_export("keras.initializers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras initializer object via its configuration."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.initializers.get")
def get(identifier):
"""Retrieves a Keras initializer object via an identifier.
The `identifier` may be the string name of a initializers function or class
(case-sensitively).
>>> identifier = 'Ones'
>>> keras.initializers.deserialize(identifier)
<...keras.initializers.initializers.Ones...>
You can also specify `config` of the initializer to this function by passing
dict containing `class_name` and `config` as an identifier. Also note that
the `class_name` must map to a `Initializer` class.
>>> cfg = {'class_name': 'Ones', 'config': {}}
>>> keras.initializers.deserialize(cfg)
<...keras.initializers.initializers.Ones...>
In the case that the `identifier` is a class, this method will return a new
instance of the class by its constructor.
Args:
identifier: String or dict that contains the initializer name or
configurations.
Returns:
Initializer instance base on the input identifier.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
obj = deserialize(config)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(
f"Could not interpret initializer identifier: {identifier}"
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import legacy
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import tree
from keras.api import utils
from keras.api._tf_keras.keras import backend
from keras.api._tf_keras.keras import layers
from keras.api._tf_keras.keras import losses
from keras.api._tf_keras.keras import metrics
from keras.api._tf_keras.keras import preprocessing
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import legacy
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import tree
from keras.api import utils
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import version
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras._tf_keras.keras import backend
from keras._tf_keras.keras import layers
from keras._tf_keras.keras import losses
from keras._tf_keras.keras import metrics
from keras._tf_keras.keras import preprocessing
|
import copy
import warnings
from dataclasses import InitVar, dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
<Deprecated version="2.14.0">
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
</Deprecated>
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
disable_tqdm (`bool`, defaults to `False`):
Whether to disable the individual files download progress bar
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
use_auth_token: InitVar[Optional[Union[str, bool]]] = "deprecated"
ignore_url_params: bool = False
storage_options: Dict[str, Any] = field(default_factory=dict)
download_desc: Optional[str] = None
disable_tqdm: bool = False
def __post_init__(self, use_auth_token):
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
self.token = use_auth_token
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT}
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def __setattr__(self, name, value):
if name == "token" and getattr(self, "storage_options", None) is not None:
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
elif getattr(self.storage_options["hf"], "token", None) is None:
self.storage_options["hf"]["token"] = value
super().__setattr__(name, value)
|
import copy
import warnings
from dataclasses import InitVar, dataclass, field
from pathlib import Path
from typing import Any, Dict, Optional, Union
from .. import config
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
<Deprecated version="2.14.0">
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
</Deprecated>
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
use_auth_token: InitVar[Optional[Union[str, bool]]] = "deprecated"
ignore_url_params: bool = False
storage_options: Dict[str, Any] = field(default_factory=dict)
download_desc: Optional[str] = None
def __post_init__(self, use_auth_token):
if use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={use_auth_token}' instead.",
FutureWarning,
)
self.token = use_auth_token
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT}
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def __setattr__(self, name, value):
if name == "token" and getattr(self, "storage_options", None) is not None:
if "hf" not in self.storage_options:
self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
elif getattr(self.storage_options["hf"], "token", None) is None:
self.storage_options["hf"]["token"] = value
super().__setattr__(name, value)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.