input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch.nn.functional as F
from torch import Tensor
from sentence_transformers.models.Module import Module
class Normalize(Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self) -> None:
super().__init__()
def forward(self, features: dict[str, Tensor]) -> dict[str, Tensor]:
features.update({"sentence_embedding": F.normalize(features["sentence_embedding"], p=2, dim=1)})
return features
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
return
@classmethod
def load(cls, *args, **kwargs) -> Self:
return cls()
|
from __future__ import annotations
import torch.nn.functional as F
from torch import Tensor, nn
class Normalize(nn.Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self) -> None:
super().__init__()
def forward(self, features: dict[str, Tensor]) -> dict[str, Tensor]:
features.update({"sentence_embedding": F.normalize(features["sentence_embedding"], p=2, dim=1)})
return features
def save(self, output_path) -> None:
pass
@staticmethod
def load(input_path) -> Normalize:
return Normalize()
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.base_doc.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class AbstractType(BaseNode):
_proto_type_name: Optional[str] = None
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
@abstractmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
...
@classmethod
@abstractmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
...
@abstractmethod
def _to_node_protobuf(self: T) -> 'NodeProto':
...
def _docarray_to_json_compatible(self):
"""
Convert itself into a json compatible object
:return: a representation of the tensor compatible with orjson
"""
return self
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.base_document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class AbstractType(BaseNode):
_proto_type_name: Optional[str] = None
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
@abstractmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
...
@classmethod
@abstractmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
...
@abstractmethod
def _to_node_protobuf(self: T) -> 'NodeProto':
...
def _docarray_to_json_compatible(self):
"""
Convert itself into a json compatible object
:return: a representation of the tensor compatible with orjson
"""
return self
|
from typing import Any, Dict, Optional, Type
from jina.jaml.parsers.base import BaseLegacyParser
from jina.serve.runtimes.gateway.gateway import BaseGateway
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
class GatewayLegacyParser(BaseLegacyParser):
"""Legacy parser for gateway."""
def parse(
self,
cls: Type['BaseGateway'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'BaseGateway':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: gateway yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Gateway YAML parser given the syntax version number
"""
from jina.logging.predefined import default_logger
data['metas'] = {}
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
for key in {
'name',
'port',
'protocol',
'host',
'tracing',
'graph_description',
'graph_conditions',
'deployments_addresses',
'deployments_metadata',
'deployments_no_reduce',
'timeout_send',
'retries',
'compression',
'runtime_name',
'prefetch',
'meter',
'log_config',
}:
if runtime_args and not runtime_args.get(key) and data.get(key):
runtime_args[key] = data.get(key)
if runtime_args.get('default_port'):
yaml_port = data.get('port')
if isinstance(yaml_port, int):
yaml_port = [yaml_port]
runtime_args['port'] = yaml_port or runtime_args.get('port')
obj = cls(
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
runtime_args=runtime_args,
req_handler_cls=GatewayRequestHandler
)
cls._init_from_yaml = False
obj.is_updated = False
return obj
def dump(self, data: 'BaseGateway') -> Dict:
"""
:param data: versioned gateway object
:return: the dictionary given a versioned gateway object
"""
a = {k: v for k, v in data._init_kwargs_dict.items()}
r = {}
if a:
r['with'] = a
return r
|
from typing import Any, Dict, Optional, Type
from jina.jaml.parsers.base import BaseLegacyParser
from jina.serve.gateway import BaseGateway
class GatewayLegacyParser(BaseLegacyParser):
"""Legacy parser for gateway."""
def parse(
self,
cls: Type['BaseGateway'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'BaseGateway':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: gateway yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Gateway YAML parser given the syntax version number
"""
from jina.logging.predefined import default_logger
data['metas'] = {}
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
for key in {
'name',
'port',
'protocol',
'host',
'tracing',
'graph_description',
'graph_conditions',
'deployments_addresses',
'deployments_metadata',
'deployments_no_reduce',
'timeout_send',
'retries',
'compression',
'runtime_name',
'prefetch',
'meter',
'log_config',
}:
if runtime_args and not runtime_args.get(key) and data.get(key):
runtime_args[key] = data.get(key)
if runtime_args.get('default_port'):
yaml_port = data.get('port')
if isinstance(yaml_port, int):
yaml_port = [yaml_port]
runtime_args['port'] = yaml_port or runtime_args.get('port')
obj = cls(
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
runtime_args=runtime_args,
)
cls._init_from_yaml = False
obj.is_updated = False
return obj
def dump(self, data: 'BaseGateway') -> Dict:
"""
:param data: versioned gateway object
:return: the dictionary given a versioned gateway object
"""
a = {k: v for k, v in data._init_kwargs_dict.items()}
r = {}
if a:
r['with'] = a
return r
|
from importlib import metadata
from langchain_core._api import warn_deprecated
## Create namespaces for pydantic v1 and v2.
# This code must stay at the top of the file before other modules may
# attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules.
#
# This hack is done for the following reasons:
# * Langchain will attempt to remain compatible with both pydantic v1 and v2 since
# both dependencies and dependents may be stuck on either version of v1 or v2.
# * Creating namespaces for pydantic v1 and v2 should allow us to write code that
# unambiguously uses either v1 or v2 API.
# * This change is easier to roll out and roll back.
from pydantic.v1 import * # noqa: F403
try:
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
except metadata.PackageNotFoundError:
_PYDANTIC_MAJOR_VERSION = 0
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
from importlib import metadata
from langchain_core._api import warn_deprecated
## Create namespaces for pydantic v1 and v2.
# This code must stay at the top of the file before other modules may
# attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules.
#
# This hack is done for the following reasons:
# * Langchain will attempt to remain compatible with both pydantic v1 and v2 since
# both dependencies and dependents may be stuck on either version of v1 or v2.
# * Creating namespaces for pydantic v1 and v2 should allow us to write code that
# unambiguously uses either v1 or v2 API.
# * This change is easier to roll out and roll back.
try:
from pydantic.v1 import * # noqa: F403
except ImportError:
from pydantic import * # type: ignore # noqa: F403
try:
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
except metadata.PackageNotFoundError:
_PYDANTIC_MAJOR_VERSION = 0
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import (
SparseBinaryClassificationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseMSEEvaluator import (
SparseMSEEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import (
SparseNanoBEIREvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseRerankingEvaluator import (
SparseRerankingEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTranslationEvaluator import (
SparseTranslationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTripletEvaluator import (
SparseTripletEvaluator,
)
__all__ = [
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseBinaryClassificationEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
]
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import (
SparseBinaryClassificationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseMSEEvaluator import (
SparseMSEEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import (
SparseNanoBEIREvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseRerankingEvaluator import (
SparseRerankingEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTranslationEvaluator import (
SparseTranslationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTripletEvaluator import (
SparseTripletEvaluator,
)
__all__ = [
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseBinaryClassificationEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
]
# TODO: Ask Update to TOM on if they're important : LabelAccuracyEvaluator, ParaphraseMiningEvaluator
|
"""Module to change the configuration of libsox, which is used by I/O functions like
:py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`.
"""
from typing import Dict, List
import torchaudio
@torchaudio._extension.fail_if_no_sox
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
torchaudio.lib._torchaudio_sox.set_seed(seed)
@torchaudio._extension.fail_if_no_sox
def set_verbosity(verbosity: int):
"""Set libsox's verbosity
Args:
verbosity (int): Set verbosity level of libsox.
* ``1`` failure messages
* ``2`` warnings
* ``3`` details of processing
* ``4``-``6`` increasing levels of debug messages
See Also:
http://sox.sourceforge.net/sox.html
"""
torchaudio.lib._torchaudio_sox.set_verbosity(verbosity)
@torchaudio._extension.fail_if_no_sox
def set_buffer_size(buffer_size: int):
"""Set buffer size for sox effect chain
Args:
buffer_size (int): Set the size in bytes of the buffers used for processing audio.
See Also:
http://sox.sourceforge.net/sox.html
"""
torchaudio.lib._torchaudio_sox.set_buffer_size(buffer_size)
@torchaudio._extension.fail_if_no_sox
def set_use_threads(use_threads: bool):
"""Set multithread option for sox effect chain
Args:
use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing.
To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support.
See Also:
http://sox.sourceforge.net/sox.html
"""
torchaudio.lib._torchaudio_sox.set_use_threads(use_threads)
@torchaudio._extension.fail_if_no_sox
def list_effects() -> Dict[str, str]:
"""List the available sox effect names
Returns:
Dict[str, str]: Mapping from ``effect name`` to ``usage``
"""
return dict(torchaudio.lib._torchaudio_sox.list_effects())
@torchaudio._extension.fail_if_no_sox
def list_read_formats() -> List[str]:
"""List the supported audio formats for read
Returns:
List[str]: List of supported audio formats
"""
return torchaudio.lib._torchaudio_sox.list_read_formats()
@torchaudio._extension.fail_if_no_sox
def list_write_formats() -> List[str]:
"""List the supported audio formats for write
Returns:
List[str]: List of supported audio formats
"""
return torchaudio.lib._torchaudio_sox.list_write_formats()
@torchaudio._extension.fail_if_no_sox
def get_buffer_size() -> int:
"""Get buffer size for sox effect chain
Returns:
int: size in bytes of buffers used for processing audio.
"""
return torchaudio.lib._torchaudio_sox.get_buffer_size()
|
"""Module to change the configuration of libsox, which is used by I/O functions like
:py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`.
"""
from typing import Dict, List
import torch
import torchaudio
@torchaudio._extension.fail_if_no_sox
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_seed(seed)
@torchaudio._extension.fail_if_no_sox
def set_verbosity(verbosity: int):
"""Set libsox's verbosity
Args:
verbosity (int): Set verbosity level of libsox.
* ``1`` failure messages
* ``2`` warnings
* ``3`` details of processing
* ``4``-``6`` increasing levels of debug messages
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_verbosity(verbosity)
@torchaudio._extension.fail_if_no_sox
def set_buffer_size(buffer_size: int):
"""Set buffer size for sox effect chain
Args:
buffer_size (int): Set the size in bytes of the buffers used for processing audio.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_buffer_size(buffer_size)
@torchaudio._extension.fail_if_no_sox
def set_use_threads(use_threads: bool):
"""Set multithread option for sox effect chain
Args:
use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing.
To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support.
See Also:
http://sox.sourceforge.net/sox.html
"""
torch.ops.torchaudio.sox_utils_set_use_threads(use_threads)
@torchaudio._extension.fail_if_no_sox
def list_effects() -> Dict[str, str]:
"""List the available sox effect names
Returns:
Dict[str, str]: Mapping from ``effect name`` to ``usage``
"""
return dict(torch.ops.torchaudio.sox_utils_list_effects())
@torchaudio._extension.fail_if_no_sox
def list_read_formats() -> List[str]:
"""List the supported audio formats for read
Returns:
List[str]: List of supported audio formats
"""
return torch.ops.torchaudio.sox_utils_list_read_formats()
@torchaudio._extension.fail_if_no_sox
def list_write_formats() -> List[str]:
"""List the supported audio formats for write
Returns:
List[str]: List of supported audio formats
"""
return torch.ops.torchaudio.sox_utils_list_write_formats()
@torchaudio._extension.fail_if_no_sox
def get_buffer_size() -> int:
"""Get buffer size for sox effect chain
Returns:
int: size in bytes of buffers used for processing audio.
"""
return torch.ops.torchaudio.sox_utils_get_buffer_size()
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import Formatter
if TYPE_CHECKING:
import torch
class TorchFormatter(Formatter[Mapping, "torch.Tensor", Mapping]):
def __init__(self, features=None, **torch_tensor_kwargs):
super().__init__(features=features)
self.torch_tensor_kwargs = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _consolidate(self, column):
import torch
if isinstance(column, list) and column:
if all(
isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column
):
return torch.stack(column)
return column
def _tensorize(self, value):
import torch
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": torch.int64}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": torch.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "torch.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import Formatter
if TYPE_CHECKING:
import torch
class TorchFormatter(Formatter[dict, "torch.Tensor", dict]):
def __init__(self, features=None, decoded=True, **torch_tensor_kwargs):
super().__init__(features=features, decoded=decoded)
self.torch_tensor_kwargs = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _consolidate(self, column):
import torch
if isinstance(column, list) and column:
if all(
isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column
):
return torch.stack(column)
return column
def _tensorize(self, value):
import torch
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": torch.int64}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": torch.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct)
def format_row(self, pa_table: pa.Table) -> dict:
row = self.numpy_arrow_extractor().extract_row(pa_table)
if self.decoded:
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "torch.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
if self.decoded:
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
if self.decoded:
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
from typing import TYPE_CHECKING
import paddle
if TYPE_CHECKING: # pragma: no cover
from paddle import tensor
import numpy
def cosine(
x_mat: 'tensor', y_mat: 'tensor', eps: float = 1e-7, device: str = 'cpu'
) -> 'numpy.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:param eps: a small jitter to avoid divde by zero
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
paddle.set_device(device)
a_n, b_n = x_mat.norm(axis=1)[:, None], y_mat.norm(axis=1)[:, None]
a_norm = x_mat / paddle.clip(a_n, min=eps)
b_norm = y_mat / paddle.clip(b_n, min=eps)
sim_mt = 1 - paddle.mm(a_norm, b_norm.transpose(perm=[1, 0]))
return sim_mt.numpy()
def sqeuclidean(
x_mat: 'tensor', y_mat: 'tensor', device: str = 'cpu'
) -> 'numpy.ndarray':
"""Squared euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: paddle array with ndim=2
:param y_mat: paddle array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
paddle.set_device(device)
return (
paddle.sum(y_mat**2, axis=1)
+ paddle.sum(x_mat**2, axis=1)[:, None]
- 2 * paddle.mm(x_mat, y_mat.transpose(perm=[1, 0]))
).numpy()
def euclidean(x_mat: 'tensor', y_mat: 'tensor', device: str = 'cpu') -> 'numpy.ndarray':
"""Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: paddle array with ndim=2
:param y_mat: paddle array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
paddle.set_device(device)
return paddle.sqrt(
paddle.sum(y_mat**2, axis=1)
+ paddle.sum(x_mat**2, axis=1)[:, None]
- 2 * paddle.mm(x_mat, y_mat.transpose(perm=[1, 0]))
).numpy()
|
from typing import TYPE_CHECKING
import paddle
if TYPE_CHECKING:
from paddle import tensor
import numpy
def cosine(
x_mat: 'tensor', y_mat: 'tensor', eps: float = 1e-7, device: str = 'cpu'
) -> 'numpy.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:param eps: a small jitter to avoid divde by zero
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
paddle.set_device(device)
a_n, b_n = x_mat.norm(axis=1)[:, None], y_mat.norm(axis=1)[:, None]
a_norm = x_mat / paddle.clip(a_n, min=eps)
b_norm = y_mat / paddle.clip(b_n, min=eps)
sim_mt = 1 - paddle.mm(a_norm, b_norm.transpose(perm=[1, 0]))
return sim_mt.numpy()
def sqeuclidean(
x_mat: 'tensor', y_mat: 'tensor', device: str = 'cpu'
) -> 'numpy.ndarray':
"""Squared euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: paddle array with ndim=2
:param y_mat: paddle array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
paddle.set_device(device)
return (
paddle.sum(y_mat**2, axis=1)
+ paddle.sum(x_mat**2, axis=1)[:, None]
- 2 * paddle.mm(x_mat, y_mat.transpose(perm=[1, 0]))
).numpy()
def euclidean(x_mat: 'tensor', y_mat: 'tensor', device: str = 'cpu') -> 'numpy.ndarray':
"""Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: paddle array with ndim=2
:param y_mat: paddle array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
paddle.set_device(device)
return paddle.sqrt(
paddle.sum(y_mat**2, axis=1)
+ paddle.sum(x_mat**2, axis=1)[:, None]
- 2 * paddle.mm(x_mat, y_mat.transpose(perm=[1, 0]))
).numpy()
|
from typing import Final
from dask.array import * # noqa: F403
# These imports may overwrite names from the import * above.
from ._aliases import * # noqa: F403
__array_api_version__: Final = "2024.12"
# See the comment in the numpy __init__.py
__import__(__package__ + '.linalg')
__import__(__package__ + '.fft')
|
from dask.array import * # noqa: F403
# These imports may overwrite names from the import * above.
from ._aliases import * # noqa: F403
__array_api_version__ = '2024.12'
__import__(__package__ + '.linalg')
__import__(__package__ + '.fft')
|
"""Module for async requests generator."""
from typing import AsyncIterator, Optional, Dict, TYPE_CHECKING
from jina.clients.request.helper import _new_data_request_from_batch, _new_data_request
from jina.enums import DataInputType
from jina.importer import ImportExtensions
from jina.logging.predefined import default_logger
from jina.types.request import Request
if TYPE_CHECKING: # pragma: no cover
from jina.clients.request import GeneratorSourceType
async def request_generator(
exec_endpoint: str,
data: 'GeneratorSourceType',
request_size: int = 0,
data_type: DataInputType = DataInputType.AUTO,
target_executor: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs, # do not remove this, add on purpose to suppress unknown kwargs
) -> AsyncIterator['Request']:
"""An async :function:`request_generator`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param data: the data to use in the request
:param request_size: the number of Documents per request
:param data_type: if ``data`` is an iterator over self-contained document, i.e. :class:`DocumentSourceType`;
or an iterator over possible Document content (set to text, blob and buffer).
:param parameters: the kwargs that will be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param kwargs: additional arguments
:yield: request
"""
_kwargs = dict(extra_kwargs=kwargs)
try:
if data is None:
# this allows empty inputs, i.e. a data request with only parameters
yield _new_data_request(
endpoint=exec_endpoint, target=target_executor, parameters=parameters
)
else:
with ImportExtensions(required=True):
import aiostream
async for batch in aiostream.stream.chunks(data, request_size):
yield _new_data_request_from_batch(
_kwargs=kwargs,
batch=batch,
data_type=data_type,
endpoint=exec_endpoint,
target=target_executor,
parameters=parameters,
)
except Exception as ex:
# must be handled here, as grpc channel wont handle Python exception
default_logger.critical(f'inputs is not valid! {ex!r}', exc_info=True)
raise
|
"""Module for async requests generator."""
from typing import AsyncIterator, Optional, Dict, TYPE_CHECKING
from jina.clients.request.helper import _new_data_request_from_batch, _new_data_request
from jina.enums import DataInputType
from jina.importer import ImportExtensions
from jina.logging.predefined import default_logger
from jina.types.request import Request
if TYPE_CHECKING:
from jina.clients.request import GeneratorSourceType
async def request_generator(
exec_endpoint: str,
data: 'GeneratorSourceType',
request_size: int = 0,
data_type: DataInputType = DataInputType.AUTO,
target_executor: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs, # do not remove this, add on purpose to suppress unknown kwargs
) -> AsyncIterator['Request']:
"""An async :function:`request_generator`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param data: the data to use in the request
:param request_size: the number of Documents per request
:param data_type: if ``data`` is an iterator over self-contained document, i.e. :class:`DocumentSourceType`;
or an iterator over possible Document content (set to text, blob and buffer).
:param parameters: the kwargs that will be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param kwargs: additional arguments
:yield: request
"""
_kwargs = dict(extra_kwargs=kwargs)
try:
if data is None:
# this allows empty inputs, i.e. a data request with only parameters
yield _new_data_request(
endpoint=exec_endpoint, target=target_executor, parameters=parameters
)
else:
with ImportExtensions(required=True):
import aiostream
async for batch in aiostream.stream.chunks(data, request_size):
yield _new_data_request_from_batch(
_kwargs=kwargs,
batch=batch,
data_type=data_type,
endpoint=exec_endpoint,
target=target_executor,
parameters=parameters,
)
except Exception as ex:
# must be handled here, as grpc channel wont handle Python exception
default_logger.critical(f'inputs is not valid! {ex!r}', exc_info=True)
raise
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from .extmath import stable_cumsum
def _weighted_percentile(array, sample_weight, percentile=50):
"""Compute weighted percentile
Computes lower weighted percentile. If `array` is a 2D array, the
`percentile` is computed along the axis 0.
.. versionchanged:: 0.24
Accepts 2D `array`.
Parameters
----------
array : 1D or 2D array
Values to take the weighted percentile of.
sample_weight: 1D or 2D array
Weights for each value in `array`. Must be same shape as `array` or
of shape `(array.shape[0],)`.
percentile: int or float, default=50
Percentile to compute. Must be value between 0 and 100.
Returns
-------
percentile : int if `array` 1D, ndarray if `array` 2D
Weighted percentile.
"""
n_dim = array.ndim
if n_dim == 0:
return array[()]
if array.ndim == 1:
array = array.reshape((-1, 1))
# When sample_weight 1D, repeat for each array.shape[1]
if array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]:
sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T
sorted_idx = np.argsort(array, axis=0)
sorted_weights = np.take_along_axis(sample_weight, sorted_idx, axis=0)
# Find index of median prediction for each sample
weight_cdf = stable_cumsum(sorted_weights, axis=0)
adjusted_percentile = percentile / 100 * weight_cdf[-1]
# For percentile=0, ignore leading observations with sample_weight=0. GH20528
mask = adjusted_percentile == 0
adjusted_percentile[mask] = np.nextafter(
adjusted_percentile[mask], adjusted_percentile[mask] + 1
)
percentile_idx = np.array(
[
np.searchsorted(weight_cdf[:, i], adjusted_percentile[i])
for i in range(weight_cdf.shape[1])
]
)
percentile_idx = np.array(percentile_idx)
# In rare cases, percentile_idx equals to sorted_idx.shape[0]
max_idx = sorted_idx.shape[0] - 1
percentile_idx = np.apply_along_axis(
lambda x: np.clip(x, 0, max_idx), axis=0, arr=percentile_idx
)
col_index = np.arange(array.shape[1])
percentile_in_sorted = sorted_idx[percentile_idx, col_index]
percentile = array[percentile_in_sorted, col_index]
return percentile[0] if n_dim == 1 else percentile
# TODO: refactor to do the symmetrisation inside _weighted_percentile to avoid
# sorting the input array twice.
def _averaged_weighted_percentile(array, sample_weight, percentile=50):
return (
_weighted_percentile(array, sample_weight, percentile)
- _weighted_percentile(-array, sample_weight, 100 - percentile)
) / 2
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from .extmath import stable_cumsum
def _weighted_percentile(array, sample_weight, percentile=50):
"""Compute weighted percentile
Computes lower weighted percentile. If `array` is a 2D array, the
`percentile` is computed along the axis 0.
.. versionchanged:: 0.24
Accepts 2D `array`.
Parameters
----------
array : 1D or 2D array
Values to take the weighted percentile of.
sample_weight: 1D or 2D array
Weights for each value in `array`. Must be same shape as `array` or
of shape `(array.shape[0],)`.
percentile: int or float, default=50
Percentile to compute. Must be value between 0 and 100.
Returns
-------
percentile : int if `array` 1D, ndarray if `array` 2D
Weighted percentile.
"""
n_dim = array.ndim
if n_dim == 0:
return array[()]
if array.ndim == 1:
array = array.reshape((-1, 1))
# When sample_weight 1D, repeat for each array.shape[1]
if array.shape != sample_weight.shape and array.shape[0] == sample_weight.shape[0]:
sample_weight = np.tile(sample_weight, (array.shape[1], 1)).T
sorted_idx = np.argsort(array, axis=0)
sorted_weights = np.take_along_axis(sample_weight, sorted_idx, axis=0)
# Find index of median prediction for each sample
weight_cdf = stable_cumsum(sorted_weights, axis=0)
adjusted_percentile = percentile / 100 * weight_cdf[-1]
# For percentile=0, ignore leading observations with sample_weight=0. GH20528
mask = adjusted_percentile == 0
adjusted_percentile[mask] = np.nextafter(
adjusted_percentile[mask], adjusted_percentile[mask] + 1
)
percentile_idx = np.array(
[
np.searchsorted(weight_cdf[:, i], adjusted_percentile[i])
for i in range(weight_cdf.shape[1])
]
)
percentile_idx = np.array(percentile_idx)
# In rare cases, percentile_idx equals to sorted_idx.shape[0]
max_idx = sorted_idx.shape[0] - 1
percentile_idx = np.apply_along_axis(
lambda x: np.clip(x, 0, max_idx), axis=0, arr=percentile_idx
)
col_index = np.arange(array.shape[1])
percentile_in_sorted = sorted_idx[percentile_idx, col_index]
percentile = array[percentile_in_sorted, col_index]
return percentile[0] if n_dim == 1 else percentile
|
"""
LexRank implementation
Source: https://github.com/crabcamp/lexrank/tree/dev
"""
import logging
import numpy as np
from scipy.sparse.csgraph import connected_components
from scipy.special import softmax
logger = logging.getLogger(__name__)
def degree_centrality_scores(
similarity_matrix,
threshold=None,
increase_power=True,
):
if not (threshold is None or isinstance(threshold, float) and 0 <= threshold < 1):
raise ValueError(
"'threshold' should be a floating-point number " "from the interval [0, 1) or None",
)
if threshold is None:
markov_matrix = create_markov_matrix(similarity_matrix)
else:
markov_matrix = create_markov_matrix_discrete(
similarity_matrix,
threshold,
)
scores = stationary_distribution(
markov_matrix,
increase_power=increase_power,
normalized=False,
)
return scores
def _power_method(transition_matrix, increase_power=True, max_iter=10000):
eigenvector = np.ones(len(transition_matrix))
if len(eigenvector) == 1:
return eigenvector
transition = transition_matrix.transpose()
for _ in range(max_iter):
eigenvector_next = np.dot(transition, eigenvector)
if np.allclose(eigenvector_next, eigenvector):
return eigenvector_next
eigenvector = eigenvector_next
if increase_power:
transition = np.dot(transition, transition)
logger.warning("Maximum number of iterations for power method exceeded without convergence!")
return eigenvector_next
def connected_nodes(matrix):
_, labels = connected_components(matrix)
groups = []
for tag in np.unique(labels):
group = np.where(labels == tag)[0]
groups.append(group)
return groups
def create_markov_matrix(weights_matrix):
n_1, n_2 = weights_matrix.shape
if n_1 != n_2:
raise ValueError("'weights_matrix' should be square")
row_sum = weights_matrix.sum(axis=1, keepdims=True)
# normalize probability distribution differently if we have negative transition values
if np.min(weights_matrix) <= 0:
return softmax(weights_matrix, axis=1)
return weights_matrix / row_sum
def create_markov_matrix_discrete(weights_matrix, threshold):
discrete_weights_matrix = np.zeros(weights_matrix.shape)
ixs = np.where(weights_matrix >= threshold)
discrete_weights_matrix[ixs] = 1
return create_markov_matrix(discrete_weights_matrix)
def stationary_distribution(
transition_matrix,
increase_power=True,
normalized=True,
):
n_1, n_2 = transition_matrix.shape
if n_1 != n_2:
raise ValueError("'transition_matrix' should be square")
distribution = np.zeros(n_1)
grouped_indices = connected_nodes(transition_matrix)
for group in grouped_indices:
t_matrix = transition_matrix[np.ix_(group, group)]
eigenvector = _power_method(t_matrix, increase_power=increase_power)
distribution[group] = eigenvector
if normalized:
distribution /= n_1
return distribution
|
"""
LexRank implementation
Source: https://github.com/crabcamp/lexrank/tree/dev
"""
import numpy as np
from scipy.sparse.csgraph import connected_components
from scipy.special import softmax
import logging
logger = logging.getLogger(__name__)
def degree_centrality_scores(
similarity_matrix,
threshold=None,
increase_power=True,
):
if not (threshold is None or isinstance(threshold, float) and 0 <= threshold < 1):
raise ValueError(
"'threshold' should be a floating-point number " "from the interval [0, 1) or None",
)
if threshold is None:
markov_matrix = create_markov_matrix(similarity_matrix)
else:
markov_matrix = create_markov_matrix_discrete(
similarity_matrix,
threshold,
)
scores = stationary_distribution(
markov_matrix,
increase_power=increase_power,
normalized=False,
)
return scores
def _power_method(transition_matrix, increase_power=True, max_iter=10000):
eigenvector = np.ones(len(transition_matrix))
if len(eigenvector) == 1:
return eigenvector
transition = transition_matrix.transpose()
for _ in range(max_iter):
eigenvector_next = np.dot(transition, eigenvector)
if np.allclose(eigenvector_next, eigenvector):
return eigenvector_next
eigenvector = eigenvector_next
if increase_power:
transition = np.dot(transition, transition)
logger.warning("Maximum number of iterations for power method exceeded without convergence!")
return eigenvector_next
def connected_nodes(matrix):
_, labels = connected_components(matrix)
groups = []
for tag in np.unique(labels):
group = np.where(labels == tag)[0]
groups.append(group)
return groups
def create_markov_matrix(weights_matrix):
n_1, n_2 = weights_matrix.shape
if n_1 != n_2:
raise ValueError("'weights_matrix' should be square")
row_sum = weights_matrix.sum(axis=1, keepdims=True)
# normalize probability distribution differently if we have negative transition values
if np.min(weights_matrix) <= 0:
return softmax(weights_matrix, axis=1)
return weights_matrix / row_sum
def create_markov_matrix_discrete(weights_matrix, threshold):
discrete_weights_matrix = np.zeros(weights_matrix.shape)
ixs = np.where(weights_matrix >= threshold)
discrete_weights_matrix[ixs] = 1
return create_markov_matrix(discrete_weights_matrix)
def stationary_distribution(
transition_matrix,
increase_power=True,
normalized=True,
):
n_1, n_2 = transition_matrix.shape
if n_1 != n_2:
raise ValueError("'transition_matrix' should be square")
distribution = np.zeros(n_1)
grouped_indices = connected_nodes(transition_matrix)
for group in grouped_indices:
t_matrix = transition_matrix[np.ix_(group, group)]
eigenvector = _power_method(t_matrix, increase_power=increase_power)
distribution[group] = eigenvector
if normalized:
distribution /= n_1
return distribution
|
"""Simple Reader that reads abstract of primary citation for a given PDB id."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.pdb.utils import get_pdb_abstract
class PdbAbstractReader(BaseReader):
"""Protein Data Bank entries' primary citation abstract reader."""
def __init__(self) -> None:
super().__init__()
def load_data(self, pdb_ids: List[str]) -> List[Document]:
"""
Load data from RCSB or EBI REST API.
Args:
pdb_ids (List[str]): List of PDB ids \
for which primary citation abstract are to be read.
"""
results = []
for pdb_id in pdb_ids:
title, abstracts = get_pdb_abstract(pdb_id)
primary_citation = abstracts[title]
abstract = primary_citation["abstract"]
abstract_text = "\n".join(
["\n".join([str(k), str(v)]) for k, v in abstract.items()]
)
results.append(
Document(
text=abstract_text,
extra_info={"pdb_id": pdb_id, "primary_citation": primary_citation},
)
)
return results
|
"""Simple Reader that reads abstract of primary citation for a given PDB id."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.pdb.utils import get_pdb_abstract
class PdbAbstractReader(BaseReader):
"""Protein Data Bank entries' primary citation abstract reader."""
def __init__(self) -> None:
super().__init__()
def load_data(self, pdb_ids: List[str]) -> List[Document]:
"""Load data from RCSB or EBI REST API.
Args:
pdb_ids (List[str]): List of PDB ids \
for which primary citation abstract are to be read.
"""
results = []
for pdb_id in pdb_ids:
title, abstracts = get_pdb_abstract(pdb_id)
primary_citation = abstracts[title]
abstract = primary_citation["abstract"]
abstract_text = "\n".join(
["\n".join([str(k), str(v)]) for k, v in abstract.items()]
)
results.append(
Document(
text=abstract_text,
extra_info={"pdb_id": pdb_id, "primary_citation": primary_citation},
)
)
return results
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.12"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
"""FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.115.11"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .exceptions import WebSocketException as WebSocketException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from PIL import Image as PILImage
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ImageUrl')
IMAGE_FILE_FORMATS = ('png', 'jpeg', 'jpg')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to a .png, .jpeg, or .jpg file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_image_extension = any(url.endswith(ext) for ext in IMAGE_FILE_FORMATS)
if not has_image_extension:
raise ValueError(
f'Image URL must have one of the following extensions:'
f'{IMAGE_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load_pil(self, timeout: Optional[float] = None) -> 'PILImage.Image':
"""
Load the image from the bytes into a `PIL.Image.Image` instance
---
```python
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import ImageUrl
img_url = "https://upload.wikimedia.org/wikipedia/commons/8/80/Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
img_url = parse_obj_as(ImageUrl, img_url)
img = img_url.load_pil()
from PIL.Image import Image
assert isinstance(img, Image)
```
---
:return: a Pillow image
"""
from docarray.typing.bytes.image_bytes import ImageBytes
return ImageBytes(self.load_bytes(timeout=timeout)).load_pil()
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> np.ndarray:
"""
Load the data from the url into a numpy.ndarray image tensor
---
```python
from docarray import BaseDoc
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: np.ndarray representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ImageUrl')
IMAGE_FILE_FORMATS = ('png', 'jpeg', 'jpg')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to a .png, .jpeg, or .jpg file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_image_extension = any(url.endswith(ext) for ext in IMAGE_FILE_FORMATS)
if not has_image_extension:
raise ValueError(
f'Image URL must have one of the following extensions:'
f'{IMAGE_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> np.ndarray:
"""
Load the data from the url into a numpy.ndarray image tensor
---
```python
from docarray import BaseDoc
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
```
---
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: np.ndarray representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
"""Generation output schema."""
from __future__ import annotations
from typing import Any, Literal, Optional
from langchain_core.load import Serializable
from langchain_core.utils._merge import merge_dicts
class Generation(Serializable):
"""A single text generation output.
Generation represents the response from an "old-fashioned" LLM that
generates regular text (not chat messages).
This model is used internally by chat model and will eventually
be mapped to a more general `LLMResult` object, and then projected into
an `AIMessage` object.
LangChain users working with chat models will usually access information via
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks). Please refer the `AIMessage` and `LLMResult` schema documentation
for more information.
"""
text: str
"""Generated text output."""
generation_info: Optional[dict[str, Any]] = None
"""Raw response from the provider.
May include things like the reason for finishing or token log probabilities.
"""
type: Literal["Generation"] = "Generation"
"""Type is used exclusively for serialization purposes.
Set to "Generation" for this class."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default namespace is ["langchain", "schema", "output"].
"""
return ["langchain", "schema", "output"]
class GenerationChunk(Generation):
"""Generation chunk, which can be concatenated with other Generation chunks."""
def __add__(self, other: GenerationChunk) -> GenerationChunk:
"""Concatenate two GenerationChunks."""
if isinstance(other, GenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return GenerationChunk(
text=self.text + other.text,
generation_info=generation_info or None,
)
msg = f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
raise TypeError(msg)
|
"""Generation output schema."""
from __future__ import annotations
from typing import Any, Literal, Optional
from langchain_core.load import Serializable
from langchain_core.utils._merge import merge_dicts
class Generation(Serializable):
"""A single text generation output.
Generation represents the response from an "old-fashioned" LLM that
generates regular text (not chat messages).
This model is used internally by chat model and will eventually
be mapped to a more general `LLMResult` object, and then projected into
an `AIMessage` object.
LangChain users working with chat models will usually access information via
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks). Please refer the `AIMessage` and `LLMResult` schema documentation
for more information.
"""
text: str
"""Generated text output."""
generation_info: Optional[dict[str, Any]] = None
"""Raw response from the provider.
May include things like the reason for finishing or token log probabilities.
"""
type: Literal["Generation"] = "Generation"
"""Type is used exclusively for serialization purposes.
Set to "Generation" for this class."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default namespace is ["langchain", "schema", "output"].
"""
return ["langchain", "schema", "output"]
class GenerationChunk(Generation):
"""Generation chunk, which can be concatenated with other Generation chunks."""
def __add__(self, other: GenerationChunk) -> GenerationChunk:
"""Concatenate two GenerationChunks."""
if isinstance(other, GenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return GenerationChunk(
text=self.text + other.text,
generation_info=generation_info or None,
)
else:
msg = (
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
)
raise TypeError(msg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .layers import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .mot import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
from .task_modules import * # noqa: F401,F403
from .test_time_augs import * # noqa: F401,F403
from .trackers import * # noqa: F401,F403
from .tracking_heads import * # noqa: F401,F403
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .layers import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .mot import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
from .task_modules import * # noqa: F401,F403
from .test_time_augs import * # noqa: F401,F403
from .trackers import * # noqa: F401,F403
|
from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_doc(d: ImageDoc) -> ImageDoc:
if d.url is not None:
d.tensor = d.url.load()
return d
@pytest.fixture()
def da():
da = DocList[ImageDoc]([ImageDoc(url=IMAGE_PATHS['png']) for _ in range(N_DOCS)])
return da
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map(da, backend):
for tensor in da.tensor:
assert tensor is None
docs = list(map_docs(docs=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for doc in docs:
assert doc.tensor is not None
def test_map_multiprocessing_lambda_func_raise_exception(da):
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(docs=da, func=lambda x: x, backend='process'))
def test_map_multiprocessing_local_func_raise_exception(da):
def local_func(x):
return x
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(docs=da, func=local_func, backend='process'))
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_check_order(backend):
da = DocList[ImageDoc]([ImageDoc(id=str(i)) for i in range(N_DOCS)])
docs = list(map_docs(docs=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for i, doc in enumerate(docs):
assert doc.id == str(i)
def load_from_da(da: DocList) -> DocList:
for doc in da:
doc.tensor = doc.url.load()
return da
class MyImage(BaseDoc):
tensor: Optional[NdArray] = None
url: ImageUrl
@pytest.mark.slow
@pytest.mark.parametrize('n_docs,batch_size', [(10, 5), (10, 8)])
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map_docs_batched(n_docs, batch_size, backend):
da = DocList[MyImage]([MyImage(url=IMAGE_PATHS['png']) for _ in range(n_docs)])
it = map_docs_batched(
docs=da, func=load_from_da, batch_size=batch_size, backend=backend
)
assert isinstance(it, Generator)
for batch in it:
assert isinstance(batch, DocList[MyImage])
|
from typing import Generator, Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_doc(d: ImageDoc) -> ImageDoc:
if d.url is not None:
d.tensor = d.url.load()
return d
@pytest.fixture()
def da():
da = DocList[ImageDoc]([ImageDoc(url=IMAGE_PATHS['png']) for _ in range(N_DOCS)])
return da
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map(da, backend):
for tensor in da.tensor:
assert tensor is None
docs = list(map_docs(docs=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for doc in docs:
assert doc.tensor is not None
def test_map_multiprocessing_lambda_func_raise_exception(da):
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(docs=da, func=lambda x: x, backend='process'))
def test_map_multiprocessing_local_func_raise_exception(da):
def local_func(x):
return x
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(docs=da, func=local_func, backend='process'))
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_check_order(backend):
da = DocList[ImageDoc]([ImageDoc(id=i) for i in range(N_DOCS)])
docs = list(map_docs(docs=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for i, doc in enumerate(docs):
assert doc.id == str(i)
def load_from_da(da: DocList) -> DocList:
for doc in da:
doc.tensor = doc.url.load()
return da
class MyImage(BaseDoc):
tensor: Optional[NdArray]
url: ImageUrl
@pytest.mark.slow
@pytest.mark.parametrize('n_docs,batch_size', [(10, 5), (10, 8)])
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map_docs_batched(n_docs, batch_size, backend):
da = DocList[MyImage]([MyImage(url=IMAGE_PATHS['png']) for _ in range(n_docs)])
it = map_docs_batched(
docs=da, func=load_from_da, batch_size=batch_size, backend=backend
)
assert isinstance(it, Generator)
for batch in it:
assert isinstance(batch, DocList[MyImage])
|
__version__ = '0.12.8'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.12.7'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import AnyTensor, Embedding, PointCloud3DUrl
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`), an
AnyTensor (`PointCloud3D.tensor`), and an Embedding (`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
You can extend this Document:
.. code-block:: python
from docarray import PointCloud3D
from docarray.typing import Embedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[Embedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
pc.second_embedding = model(pc.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument, PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDocument):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensor = mmdoc.point_cloud.url.load(samples=100)
"""
url: Optional[PointCloud3DUrl]
tensor: Optional[AnyTensor]
embedding: Optional[Embedding]
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import AnyTensor, Embedding, PointCloud3DUrl
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`), an
AnyTensor (`PointCloud3D.tensor`), and an Embedding (`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
You can extend this Document:
.. code-block:: python
from docarray import PointCloud3D
from docarray.typing import Embedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[Embedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
pc.second_embedding = model(pc.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import Document, PointCloud3D, Text
# compose it
class MultiModalDoc(Document):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensor = mmdoc.point_cloud.url.load(samples=100)
"""
url: Optional[PointCloud3DUrl]
tensor: Optional[AnyTensor]
embedding: Optional[Embedding]
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
Detection <https://arxiv.org/abs/1906.09756>`_"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None,
img_norm_cfg=None):
super(CascadeRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
img_norm_cfg=img_norm_cfg)
def show_result(self, data, result, **kwargs):
"""Show prediction results of the detector.
Args:
data (str or np.ndarray): Image filename or loaded image.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
Returns:
np.ndarray: The image with bboxes drawn on it.
"""
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
if isinstance(result, dict):
result = result['ensemble']
return super(CascadeRCNN, self).show_result(data, result, **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
Detection <https://arxiv.org/abs/1906.09756>`_"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(CascadeRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def show_result(self, data, result, **kwargs):
"""Show prediction results of the detector.
Args:
data (str or np.ndarray): Image filename or loaded image.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
Returns:
np.ndarray: The image with bboxes drawn on it.
"""
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
if isinstance(result, dict):
result = result['ensemble']
return super(CascadeRCNN, self).show_result(data, result, **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .mask_target import mask_target
from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks
from .utils import encode_mask_results, split_combined_polys
__all__ = [
'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks',
'PolygonMasks', 'encode_mask_results'
]
|
from .mask_target import mask_target
from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks
from .utils import encode_mask_results, split_combined_polys
__all__ = [
'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks',
'PolygonMasks', 'encode_mask_results'
]
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
import numpy as np
import torch
from ...models import EmbeddingModelWrapper, _ModelCatalogue
@pytest.mark.parametrize(
['model_name', 'is_supported'],
[
('ResNet', False),
('resnet18', True),
('resnet50', True),
('alexnet', True),
('xResNet', False),
('Alexnet', False)
]
)
def test_is_model_supported(model_name: str, is_supported: bool):
assert _ModelCatalogue.is_model_supported(model_name) == is_supported
@pytest.mark.parametrize(
['model_name', 'layer'],
[
('alexnet', 'features'),
('vgg11', 'features'),
('squeezenet1_0', 'features'),
('densenet121', 'features'),
('mnasnet0_5', 'layers'),
('mobilenet_v2', 'features'),
]
)
def test_is_correct_layer(model_name: str, layer: str):
assert _ModelCatalogue.get_layer_name(model_name) == layer
@pytest.mark.parametrize(
['model_name', 'dim'],
[
('mobilenet_v2', 1280),
('resnet18', 512)
]
)
def test_get_features(model_name: str, dim: int):
model_wrapper = EmbeddingModelWrapper(model_name)
embeddings = model_wrapper.compute_embeddings(
np.ones((10, 3, 224, 224), dtype=np.float32)
)
assert embeddings.shape == (10, dim)
@pytest.mark.parametrize(
'feature_map',
[
np.ones((1, 10, 10, 3)),
np.random.rand(1, 224, 224, 3),
np.zeros((1, 100, 100, 3))
]
)
def test_get_pooling(
feature_map: np.ndarray,
):
wrapper = EmbeddingModelWrapper('mobilenet_v2')
feature_map_after_pooling = wrapper._pooling_function(torch.from_numpy(feature_map))
np.testing.assert_array_almost_equal(feature_map_after_pooling, np.mean(feature_map, axis=(2, 3)))
@pytest.mark.skipif(not torch.cuda.is_available(),
reason='requires GPU and CUDA')
def test_get_features_gpu():
wrapper = EmbeddingModelWrapper('mobilenet_v2')
arr_in = np.ones((2, 3, 10, 10), dtype=np.float32)
encodings = wrapper.get_features(torch.from_numpy(arr_in).to(wrapper.device)).detach().cpu().numpy()
assert encodings.shape == (2, 1280, 1, 1)
def test_get_features_cpu():
wrapper = EmbeddingModelWrapper('mobilenet_v2', device='cpu')
arr_in = np.ones((2, 3, 224, 224), dtype=np.float32)
encodings = wrapper.get_features(torch.from_numpy(arr_in)).detach().numpy()
assert encodings.shape[1] == 1280
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
import numpy as np
import torch
from jinahub.image.encoder.models import EmbeddingModelWrapper, _ModelCatalogue
@pytest.mark.parametrize(
['model_name', 'is_supported'],
[
('ResNet', False),
('resnet18', True),
('resnet50', True),
('alexnet', True),
('xResNet', False),
('Alexnet', False)
]
)
def test_is_model_supported(model_name: str, is_supported: bool):
assert _ModelCatalogue.is_model_supported(model_name) == is_supported
@pytest.mark.parametrize(
['model_name', 'layer'],
[
('alexnet', 'features'),
('vgg11', 'features'),
('squeezenet1_0', 'features'),
('densenet121', 'features'),
('mnasnet0_5', 'layers'),
('mobilenet_v2', 'features'),
]
)
def test_is_correct_layer(model_name: str, layer: str):
assert _ModelCatalogue.get_layer_name(model_name) == layer
@pytest.mark.parametrize(
['model_name', 'dim'],
[
('mobilenet_v2', 1280),
('resnet18', 512)
]
)
def test_get_features(model_name: str, dim: int):
model_wrapper = EmbeddingModelWrapper(model_name)
embeddings = model_wrapper.compute_embeddings(
np.ones((10, 3, 224, 224), dtype=np.float32)
)
assert embeddings.shape == (10, dim)
@pytest.mark.parametrize(
'feature_map',
[
np.ones((1, 10, 10, 3)),
np.random.rand(1, 224, 224, 3),
np.zeros((1, 100, 100, 3))
]
)
def test_get_pooling(
feature_map: np.ndarray,
):
wrapper = EmbeddingModelWrapper('mobilenet_v2')
feature_map_after_pooling = wrapper._pooling_function(torch.from_numpy(feature_map))
np.testing.assert_array_almost_equal(feature_map_after_pooling, np.mean(feature_map, axis=(2, 3)))
@pytest.mark.skipif(not torch.cuda.is_available(),
reason='requires GPU and CUDA')
def test_get_features_gpu():
wrapper = EmbeddingModelWrapper('mobilenet_v2')
arr_in = np.ones((2, 3, 10, 10), dtype=np.float32)
encodings = wrapper.get_features(torch.from_numpy(arr_in).to(wrapper.device)).detach().cpu().numpy()
assert encodings.shape == (2, 1280, 1, 1)
def test_get_features_cpu():
wrapper = EmbeddingModelWrapper('mobilenet_v2', device='cpu')
arr_in = np.ones((2, 3, 224, 224), dtype=np.float32)
encodings = wrapper.get_features(torch.from_numpy(arr_in)).detach().numpy()
assert encodings.shape[1] == 1280
|
"""
Demo for using cross validation
===============================
"""
import os
import numpy as np
import xgboost as xgb
# load data in do training
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.train?format=libsvm")
)
param = {"max_depth": 2, "eta": 1, "objective": "binary:logistic"}
num_round = 2
print("running cross validation")
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value+std_value
# std_value is standard deviation of the metric
xgb.cv(
param,
dtrain,
num_round,
nfold=5,
metrics={"error"},
seed=0,
callbacks=[xgb.callback.EvaluationMonitor(show_stdv=True)],
)
print("running cross validation, disable standard deviation display")
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value
res = xgb.cv(
param,
dtrain,
num_boost_round=10,
nfold=5,
metrics={"error"},
seed=0,
callbacks=[
xgb.callback.EvaluationMonitor(show_stdv=False),
xgb.callback.EarlyStopping(3),
],
)
print(res)
print("running cross validation, with preprocessing function")
# define the preprocessing function
# used to return the preprocessed training, test data, and parameter
# we can use this to do weight rescale, etc.
# as a example, we try to set scale_pos_weight
def fpreproc(dtrain, dtest, param):
label = dtrain.get_label()
ratio = float(np.sum(label == 0)) / np.sum(label == 1)
param["scale_pos_weight"] = ratio
return (dtrain, dtest, param)
# do cross validation, for each fold
# the dtrain, dtest, param will be passed into fpreproc
# then the return value of fpreproc will be used to generate
# results of that fold
xgb.cv(param, dtrain, num_round, nfold=5, metrics={"auc"}, seed=0, fpreproc=fpreproc)
###
# you can also do cross validation with customized loss function
# See custom_objective.py
##
print("running cross validation, with customized loss function")
def logregobj(preds, dtrain):
labels = dtrain.get_label()
preds = 1.0 / (1.0 + np.exp(-preds))
grad = preds - labels
hess = preds * (1.0 - preds)
return grad, hess
def evalerror(preds, dtrain):
labels = dtrain.get_label()
preds = 1.0 / (1.0 + np.exp(-preds))
return "error", float(sum(labels != (preds > 0.0))) / len(labels)
param = {"max_depth": 2, "eta": 1}
# train with customized objective
xgb.cv(
param, dtrain, num_round, nfold=5, seed=0, obj=logregobj, custom_metric=evalerror
)
|
"""
Demo for using cross validation
===============================
"""
import os
import numpy as np
import xgboost as xgb
# load data in do training
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.train?format=libsvm")
)
param = {"max_depth": 2, "eta": 1, "objective": "binary:logistic"}
num_round = 2
print("running cross validation")
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value+std_value
# std_value is standard deviation of the metric
xgb.cv(
param,
dtrain,
num_round,
nfold=5,
metrics={"error"},
seed=0,
callbacks=[xgb.callback.EvaluationMonitor(show_stdv=True)],
)
print("running cross validation, disable standard deviation display")
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value
res = xgb.cv(
param,
dtrain,
num_boost_round=10,
nfold=5,
metrics={"error"},
seed=0,
callbacks=[
xgb.callback.EvaluationMonitor(show_stdv=False),
xgb.callback.EarlyStopping(3),
],
)
print(res)
print("running cross validation, with preprocessing function")
# define the preprocessing function
# used to return the preprocessed training, test data, and parameter
# we can use this to do weight rescale, etc.
# as a example, we try to set scale_pos_weight
def fpreproc(dtrain, dtest, param):
label = dtrain.get_label()
ratio = float(np.sum(label == 0)) / np.sum(label == 1)
param["scale_pos_weight"] = ratio
return (dtrain, dtest, param)
# do cross validation, for each fold
# the dtrain, dtest, param will be passed into fpreproc
# then the return value of fpreproc will be used to generate
# results of that fold
xgb.cv(param, dtrain, num_round, nfold=5, metrics={"auc"}, seed=0, fpreproc=fpreproc)
###
# you can also do cross validation with customized loss function
# See custom_objective.py
##
print("running cross validation, with customized loss function")
def logregobj(preds, dtrain):
labels = dtrain.get_label()
preds = 1.0 / (1.0 + np.exp(-preds))
grad = preds - labels
hess = preds * (1.0 - preds)
return grad, hess
def evalerror(preds, dtrain):
labels = dtrain.get_label()
return "error", float(sum(labels != (preds > 0.0))) / len(labels)
param = {"max_depth": 2, "eta": 1}
# train with customized objective
xgb.cv(param, dtrain, num_round, nfold=5, seed=0, obj=logregobj, feval=evalerror)
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from torch import nn
from sentence_transformers.models.Module import Module
class LSTM(Module):
"""Bidirectional LSTM running over word embeddings."""
config_keys: list[str] = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
config_file_name: str = "lstm_config.json"
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
super().__init__()
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
# Saving LSTM models with Safetensors does not work unless the weights are on CPU
# See https://github.com/UKPLab/sentence-transformers/pull/2722
device = next(self.parameters()).device
self.cpu()
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
self.to(device)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class LSTM(nn.Module):
"""Bidirectional LSTM running over word embeddings."""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
device = next(self.parameters()).device
if safe_serialization:
save_safetensors_model(self.cpu(), os.path.join(output_path, "model.safetensors"))
self.to(device)
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json")) as fIn:
config = json.load(fIn)
model = LSTM(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMultipleNegativesRankingLoss(MultipleNegativesRankingLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.dot_score) -> None:
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMultipleNegativesRankingLoss(MultipleNegativesRankingLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
|
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Activation")
class Activation(Layer):
"""Applies an activation function to an output.
Args:
activation: Activation function. It could be a callable, or the name of
an activation from the `keras.activations` namespace.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Example:
>>> layer = keras.layers.Activation('relu')
>>> layer([-3.0, -1.0, 0.0, 2.0])
[0.0, 0.0, 0.0, 2.0]
>>> layer = keras.layers.Activation(keras.activations.relu)
>>> layer([-3.0, -1.0, 0.0, 2.0])
[0.0, 0.0, 0.0, 2.0]
"""
def __init__(self, activation, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
self.built = True
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {"activation": activations.serialize(self.activation)}
base_config = super().get_config()
return {**base_config, **config}
|
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Activation")
class Activation(Layer):
"""Applies an activation function to an output.
Args:
activation: Activation function. It could be a callable, or the name of
an activation from the `keras.activations` namespace.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Example:
>>> layer = keras.layers.Activation('relu')
>>> layer([-3.0, -1.0, 0.0, 2.0])
[0.0, 0.0, 0.0, 2.0]
>>> layer = keras.layers.Activation(keras.activations.relu)
>>> layer([-3.0, -1.0, 0.0, 2.0])
[0.0, 0.0, 0.0, 2.0]
"""
def __init__(self, activation, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {"activation": activations.serialize(self.activation)}
base_config = super().get_config()
return {**base_config, **config}
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
class SqlDatasetReader(AbstractDatasetInputStream):
def __init__(
self,
sql: Union[str, "sqlalchemy.sql.Selectable"],
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
self.builder = Sql(
cache_dir=cache_dir,
features=features,
sql=sql,
con=con,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
)
# Build dataset for splits
dataset = self.builder.as_dataset(
split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
class SqlDatasetWriter:
def __init__(
self,
dataset: Dataset,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_sql_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.name = name
self.con = con
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.to_sql_kwargs = to_sql_kwargs
def write(self) -> int:
_ = self.to_sql_kwargs.pop("sql", None)
_ = self.to_sql_kwargs.pop("con", None)
written = self._write(**self.to_sql_kwargs)
return written
def _batch_sql(self, args):
offset, to_sql_kwargs = args
to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
df = batch.to_pandas()
num_rows = df.to_sql(self.name, self.con, **to_sql_kwargs)
return num_rows or len(df)
def _write(self, **to_sql_kwargs) -> int:
"""Writes the pyarrow table as SQL to a database.
Caller is responsible for opening and closing the SQL connection.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += self._batch_sql((offset, to_sql_kwargs))
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql,
[(offset, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += num_rows
return written
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
class SqlDatasetReader(AbstractDatasetInputStream):
def __init__(
self,
sql: Union[str, "sqlalchemy.sql.Selectable"],
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
self.builder = Sql(
cache_dir=cache_dir,
features=features,
sql=sql,
con=con,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
ignore_verifications = False
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
)
# Build dataset for splits
dataset = self.builder.as_dataset(
split="train", ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
class SqlDatasetWriter:
def __init__(
self,
dataset: Dataset,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_sql_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.name = name
self.con = con
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.to_sql_kwargs = to_sql_kwargs
def write(self) -> int:
_ = self.to_sql_kwargs.pop("sql", None)
_ = self.to_sql_kwargs.pop("con", None)
written = self._write(**self.to_sql_kwargs)
return written
def _batch_sql(self, args):
offset, to_sql_kwargs = args
to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
df = batch.to_pandas()
num_rows = df.to_sql(self.name, self.con, **to_sql_kwargs)
return num_rows or len(df)
def _write(self, **to_sql_kwargs) -> int:
"""Writes the pyarrow table as SQL to a database.
Caller is responsible for opening and closing the SQL connection.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += self._batch_sql((offset, to_sql_kwargs))
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql,
[(offset, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += num_rows
return written
|
"""Image prompt template for a multimodal model."""
from typing import Any
from pydantic import Field
from langchain_core.prompt_values import ImagePromptValue, ImageURL, PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.string import (
DEFAULT_FORMATTER_MAPPING,
PromptTemplateFormat,
)
from langchain_core.runnables import run_in_executor
class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
"""Image prompt template for a multimodal model."""
template: dict = Field(default_factory=dict)
"""Template for the prompt."""
template_format: PromptTemplateFormat = "f-string"
"""The format of the prompt template.
Options are: 'f-string', 'mustache', 'jinja2'."""
def __init__(self, **kwargs: Any) -> None:
"""Create an image prompt template."""
if "input_variables" not in kwargs:
kwargs["input_variables"] = []
overlap = set(kwargs["input_variables"]) & {"url", "path", "detail"}
if overlap:
msg = (
"input_variables for the image template cannot contain"
" any of 'url', 'path', or 'detail'."
f" Found: {overlap}"
)
raise ValueError(msg)
super().__init__(**kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "image-prompt"
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "image"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return ImagePromptValue(image_url=self.format(**kwargs))
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return ImagePromptValue(image_url=await self.aformat(**kwargs))
def format(
self,
**kwargs: Any,
) -> ImageURL:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Raises:
ValueError: If the url is not provided.
ValueError: If the url is not a string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
formatted = {}
for k, v in self.template.items():
if isinstance(v, str):
formatted[k] = DEFAULT_FORMATTER_MAPPING[self.template_format](
v, **kwargs
)
else:
formatted[k] = v
url = kwargs.get("url") or formatted.get("url")
if kwargs.get("path") or formatted.get("path"):
msg = (
"Loading images from 'path' has been removed as of 0.3.15 for security "
"reasons. Please specify images by 'url'."
)
raise ValueError(msg)
detail = kwargs.get("detail") or formatted.get("detail")
if not url:
msg = "Must provide url."
raise ValueError(msg)
elif not isinstance(url, str):
msg = "url must be a string."
raise ValueError(msg)
else:
output: ImageURL = {"url": url}
if detail:
# Don't check literal values here: let the API check them
output["detail"] = detail # type: ignore[typeddict-item]
return output
async def aformat(self, **kwargs: Any) -> ImageURL:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Raises:
ValueError: If the path or url is not a string.
"""
return await run_in_executor(None, self.format, **kwargs)
def pretty_repr(self, html: bool = False) -> str:
"""Return a pretty representation of the prompt.
Args:
html: Whether to return an html formatted string.
Returns:
A pretty representation of the prompt.
"""
raise NotImplementedError
|
from typing import Any
from pydantic import Field
from langchain_core.prompt_values import ImagePromptValue, ImageURL, PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.string import (
DEFAULT_FORMATTER_MAPPING,
PromptTemplateFormat,
)
from langchain_core.runnables import run_in_executor
class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
"""Image prompt template for a multimodal model."""
template: dict = Field(default_factory=dict)
"""Template for the prompt."""
template_format: PromptTemplateFormat = "f-string"
"""The format of the prompt template.
Options are: 'f-string', 'mustache', 'jinja2'."""
def __init__(self, **kwargs: Any) -> None:
if "input_variables" not in kwargs:
kwargs["input_variables"] = []
overlap = set(kwargs["input_variables"]) & {"url", "path", "detail"}
if overlap:
msg = (
"input_variables for the image template cannot contain"
" any of 'url', 'path', or 'detail'."
f" Found: {overlap}"
)
raise ValueError(msg)
super().__init__(**kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "image-prompt"
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "image"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return ImagePromptValue(image_url=self.format(**kwargs))
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return ImagePromptValue(image_url=await self.aformat(**kwargs))
def format(
self,
**kwargs: Any,
) -> ImageURL:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Raises:
ValueError: If the url is not provided.
ValueError: If the url is not a string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
formatted = {}
for k, v in self.template.items():
if isinstance(v, str):
formatted[k] = DEFAULT_FORMATTER_MAPPING[self.template_format](
v, **kwargs
)
else:
formatted[k] = v
url = kwargs.get("url") or formatted.get("url")
if kwargs.get("path") or formatted.get("path"):
msg = (
"Loading images from 'path' has been removed as of 0.3.15 for security "
"reasons. Please specify images by 'url'."
)
raise ValueError(msg)
detail = kwargs.get("detail") or formatted.get("detail")
if not url:
msg = "Must provide url."
raise ValueError(msg)
elif not isinstance(url, str):
msg = "url must be a string."
raise ValueError(msg)
else:
output: ImageURL = {"url": url}
if detail:
# Don't check literal values here: let the API check them
output["detail"] = detail # type: ignore[typeddict-item]
return output
async def aformat(self, **kwargs: Any) -> ImageURL:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Raises:
ValueError: If the path or url is not a string.
"""
return await run_in_executor(None, self.format, **kwargs)
def pretty_repr(self, html: bool = False) -> str:
"""Return a pretty representation of the prompt.
Args:
html: Whether to return an html formatted string.
Returns:
A pretty representation of the prompt.
"""
raise NotImplementedError
|
"""Wordpress reader."""
import warnings
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class WordpressReader(BaseReader):
"""
Wordpress reader. Reads data from a Wordpress workspace.
Args:
url (str): Base URL of the WordPress site.
username (Optional[str]): WordPress username for authentication.
password (Optional[str]): WordPress password for authentication.
get_pages (bool): Retrieve static WordPress 'pages'. Default True.
get_posts (bool): Retrieve WordPress 'posts' (blog entries). Default True.
additional_post_types (Optional[str]): Comma-separated list of additional post types to retrieve
(e.g., 'my-custom-page,webinars'). Default is None.
"""
def __init__(
self,
url: str,
username: Optional[str] = None,
password: Optional[str] = None,
get_pages: bool = True,
get_posts: bool = True,
additional_post_types: Optional[str] = None,
) -> None:
"""Initialize Wordpress reader."""
self.url = url
self.username = username
self.password = password
# Use a set to prevent duplicates
self.post_types = set()
# Add default types based on backward-compatible options
if get_pages:
self.post_types.add("pages")
if get_posts:
self.post_types.add("posts")
# Add any additional post types specified
if additional_post_types:
self.post_types.update(
post_type.strip() for post_type in additional_post_types.split(",")
)
# Convert post_types back to a list
self.post_types = list(self.post_types)
def load_data(self) -> List[Document]:
"""
Load data from the specified post types.
Returns:
List[Document]: List of documents.
"""
from bs4 import BeautifulSoup, GuessedAtParserWarning
# Suppressing this warning because guessing at the parser is the
# desired behavior -- we don't want to force lxml on packages
# where it's not installed.
warnings.filterwarnings("ignore", category=GuessedAtParserWarning)
results = []
articles = []
# Fetch articles for each specified post type
for post_type in self.post_types:
articles.extend(self.get_all_posts(post_type))
# Process each article to extract content and metadata
for article in articles:
body = article.get("content", {}).get("rendered", None)
if body is None:
body = article.get("content")
soup = BeautifulSoup(body)
body = soup.get_text()
title = article.get("title", {}).get("rendered", None) or article.get(
"title"
)
extra_info = {
"id": article["id"],
"title": title,
"url": article["link"],
"updated_at": article["modified"],
}
results.append(
Document(
text=body,
extra_info=extra_info,
)
)
return results
def get_all_posts(self, post_type: str) -> List[dict]:
"""Retrieve all posts of a specific type, handling pagination."""
posts = []
next_page = 1
while True:
response = self.get_posts_page(post_type, next_page)
posts.extend(response["articles"])
next_page = response["next_page"]
if next_page is None:
break
return posts
def get_posts_page(self, post_type: str, current_page: int = 1) -> dict:
"""Retrieve a single page of posts for a given post type."""
import requests
url = f"{self.url}/wp-json/wp/v2/{post_type}?per_page=100&page={current_page}"
# Handle authentication if username and password are provided
auth = (
(self.username, self.password) if self.username and self.password else None
)
response = requests.get(url, auth=auth)
response.raise_for_status() # Raise an error for bad responses
headers = response.headers
num_pages = int(headers.get("X-WP-TotalPages", 1))
next_page = current_page + 1 if num_pages > current_page else None
articles = response.json()
return {"articles": articles, "next_page": next_page}
|
"""Wordpress reader."""
import warnings
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class WordpressReader(BaseReader):
"""Wordpress reader. Reads data from a Wordpress workspace.
Args:
url (str): Base URL of the WordPress site.
username (Optional[str]): WordPress username for authentication.
password (Optional[str]): WordPress password for authentication.
get_pages (bool): Retrieve static WordPress 'pages'. Default True.
get_posts (bool): Retrieve WordPress 'posts' (blog entries). Default True.
additional_post_types (Optional[str]): Comma-separated list of additional post types to retrieve
(e.g., 'my-custom-page,webinars'). Default is None.
"""
def __init__(
self,
url: str,
username: Optional[str] = None,
password: Optional[str] = None,
get_pages: bool = True,
get_posts: bool = True,
additional_post_types: Optional[str] = None,
) -> None:
"""Initialize Wordpress reader."""
self.url = url
self.username = username
self.password = password
# Use a set to prevent duplicates
self.post_types = set()
# Add default types based on backward-compatible options
if get_pages:
self.post_types.add("pages")
if get_posts:
self.post_types.add("posts")
# Add any additional post types specified
if additional_post_types:
self.post_types.update(
post_type.strip() for post_type in additional_post_types.split(",")
)
# Convert post_types back to a list
self.post_types = list(self.post_types)
def load_data(self) -> List[Document]:
"""Load data from the specified post types.
Returns:
List[Document]: List of documents.
"""
from bs4 import BeautifulSoup, GuessedAtParserWarning
# Suppressing this warning because guessing at the parser is the
# desired behavior -- we don't want to force lxml on packages
# where it's not installed.
warnings.filterwarnings("ignore", category=GuessedAtParserWarning)
results = []
articles = []
# Fetch articles for each specified post type
for post_type in self.post_types:
articles.extend(self.get_all_posts(post_type))
# Process each article to extract content and metadata
for article in articles:
body = article.get("content", {}).get("rendered", None)
if body is None:
body = article.get("content")
soup = BeautifulSoup(body)
body = soup.get_text()
title = article.get("title", {}).get("rendered", None) or article.get(
"title"
)
extra_info = {
"id": article["id"],
"title": title,
"url": article["link"],
"updated_at": article["modified"],
}
results.append(
Document(
text=body,
extra_info=extra_info,
)
)
return results
def get_all_posts(self, post_type: str) -> List[dict]:
"""Retrieve all posts of a specific type, handling pagination."""
posts = []
next_page = 1
while True:
response = self.get_posts_page(post_type, next_page)
posts.extend(response["articles"])
next_page = response["next_page"]
if next_page is None:
break
return posts
def get_posts_page(self, post_type: str, current_page: int = 1) -> dict:
"""Retrieve a single page of posts for a given post type."""
import requests
url = f"{self.url}/wp-json/wp/v2/{post_type}?per_page=100&page={current_page}"
# Handle authentication if username and password are provided
auth = (
(self.username, self.password) if self.username and self.password else None
)
response = requests.get(url, auth=auth)
response.raise_for_status() # Raise an error for bad responses
headers = response.headers
num_pages = int(headers.get("X-WP-TotalPages", 1))
next_page = current_page + 1 if num_pages > current_page else None
articles = response.json()
return {"articles": articles, "next_page": next_page}
|
_base_ = './point-rend_r50-caffe_fpn_ms-1x_coco.py'
max_epochs = 36
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
_base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py'
max_epochs = 36
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
from keras.src.backend.config import backend
if backend() == "torch":
# When using the torch backend,
# torch needs to be imported first, otherwise it will segfault
# upon import.
import torch
from keras.src.api_export import keras_export
from keras.src.backend.common.dtypes import result_type
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.keras_tensor import any_symbolic_tensors
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.masking import get_keras_mask
from keras.src.backend.common.masking import set_keras_mask
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.stateless_scope import get_stateless_scope
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.common.symbolic_scope import in_symbolic_scope
from keras.src.backend.common.variables import AutocastScope
from keras.src.backend.common.variables import get_autocast_scope
from keras.src.backend.common.variables import is_float_dtype
from keras.src.backend.common.variables import is_int_dtype
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.common.variables import standardize_shape
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.backend.config import standardize_data_format
# Import backend functions.
if backend() == "tensorflow":
from keras.src.backend.tensorflow import * # noqa: F403
elif backend() == "jax":
from keras.src.backend.jax import * # noqa: F403
elif backend() == "torch":
from keras.src.backend.torch import * # noqa: F403
distribution_lib = None
elif backend() == "numpy":
from keras.src.backend.numpy import * # noqa: F403
distribution_lib = None
else:
raise ValueError(f"Unable to import backend : {backend()}")
BackendVariable = Variable # noqa: F405
@keras_export("keras.Variable")
class Variable(BackendVariable):
pass
backend_name_scope = name_scope # noqa: F405
@keras_export("keras.name_scope")
class name_scope(backend_name_scope):
pass
@keras_export("keras.device")
def device(device_name):
return device_scope(device_name) # noqa: F405
|
from keras.src.backend.config import backend
if backend() == "torch":
# When using the torch backend,
# torch needs to be imported first, otherwise it will segfault
# upon import.
import torch
from keras.src.backend.common.dtypes import result_type
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.keras_tensor import any_symbolic_tensors
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.masking import get_keras_mask
from keras.src.backend.common.masking import set_keras_mask
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.stateless_scope import get_stateless_scope
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.common.symbolic_scope import in_symbolic_scope
from keras.src.backend.common.variables import AutocastScope
from keras.src.backend.common.variables import get_autocast_scope
from keras.src.backend.common.variables import is_float_dtype
from keras.src.backend.common.variables import is_int_dtype
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.common.variables import standardize_shape
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.backend.config import standardize_data_format
# Import backend functions.
if backend() == "tensorflow":
from keras.src.backend.tensorflow import * # noqa: F403
elif backend() == "jax":
from keras.src.backend.jax import * # noqa: F403
elif backend() == "torch":
from keras.src.backend.torch import * # noqa: F403
distribution_lib = None
elif backend() == "numpy":
from keras.src.backend.numpy import * # noqa: F403
distribution_lib = None
else:
raise ValueError(f"Unable to import backend : {backend()}")
|
from ._dsp import (
adsr_envelope,
exp_sigmoid,
extend_pitch,
filter_waveform,
frequency_impulse_response,
oscillator_bank,
sinc_impulse_response,
)
from ._rir import simulate_rir_ism
from .functional import barkscale_fbanks
__all__ = [
"adsr_envelope",
"exp_sigmoid",
"barkscale_fbanks",
"extend_pitch",
"filter_waveform",
"frequency_impulse_response",
"oscillator_bank",
"sinc_impulse_response",
"simulate_rir_ism",
]
|
from ._dsp import (
adsr_envelope,
extend_pitch,
filter_waveform,
frequency_impulse_response,
oscillator_bank,
sinc_impulse_response,
)
from ._rir import simulate_rir_ism
from .functional import barkscale_fbanks
__all__ = [
"adsr_envelope",
"barkscale_fbanks",
"extend_pitch",
"filter_waveform",
"frequency_impulse_response",
"oscillator_bank",
"sinc_impulse_response",
"simulate_rir_ism",
]
|
# mypy: allow-untyped-defs
"""torch.multiprocessing is a wrapper around the native :mod:`multiprocessing` module.
It registers custom reducers, that use shared memory to provide shared
views on the same data in different processes. Once the tensor/storage is moved
to shared_memory (see :func:`~torch.Tensor.share_memory_`), it will be possible
to send it to other processes without making any copies.
The API is 100% compatible with the original module - it's enough to change
``import multiprocessing`` to ``import torch.multiprocessing`` to have all the
tensors sent through the queues or shared via other mechanisms, moved to shared
memory.
Because of the similarity of APIs we do not document most of this package
contents, and we recommend referring to very good docs of the original module.
"""
import multiprocessing
import sys
import torch
from .reductions import init_reductions
__all__ = ["set_sharing_strategy", "get_sharing_strategy", "get_all_sharing_strategies"]
from multiprocessing import * # noqa: F403
__all__ += multiprocessing.__all__ # noqa: PLE0605 type: ignore[attr-defined]
# This call adds a Linux specific prctl(2) wrapper function to this module.
# See https://github.com/pytorch/pytorch/pull/14391 for more information.
torch._C._multiprocessing_init()
"""Add helper function to spawn N processes and wait for completion of any of
them. This depends `mp.get_context` which was added in Python 3.4."""
from .spawn import (
ENV_VAR_PARALLEL_START,
ProcessContext,
ProcessExitedException,
ProcessRaisedException,
spawn,
SpawnContext,
start_processes,
)
if sys.platform == "darwin" or sys.platform == "win32":
_sharing_strategy = "file_system"
_all_sharing_strategies = {"file_system"}
else:
_sharing_strategy = "file_descriptor"
_all_sharing_strategies = {"file_descriptor", "file_system"}
def set_sharing_strategy(new_strategy):
"""Set the strategy for sharing CPU tensors.
Args:
new_strategy (str): Name of the selected strategy. Should be one of
the values returned by :func:`get_all_sharing_strategies()`.
"""
global _sharing_strategy
assert new_strategy in _all_sharing_strategies
_sharing_strategy = new_strategy
def get_sharing_strategy():
"""Return the current strategy for sharing CPU tensors."""
return _sharing_strategy
def get_all_sharing_strategies():
"""Return a set of sharing strategies supported on a current system."""
return _all_sharing_strategies
def _set_thread_name(name: str) -> None:
"""Set the name of the current thread.
Args:
name (str): Name of the current thread.
"""
torch._C._set_thread_name(name)
def _get_thread_name() -> str:
"""Get the name of the current thread.
Returns:
str: Name of the current thread.
"""
return torch._C._get_thread_name()
init_reductions()
# Leak ResourceTracker at exit for Python-3.12 on MacOS
# See https://github.com/pytorch/pytorch/issues/153050 and
# https://github.com/python/cpython/issues/88887 for more details
from multiprocessing.resource_tracker import ResourceTracker as _RT
if (
sys.platform == "darwin"
and sys.version_info >= (3, 12, 2)
and hasattr(_RT, "__del__")
):
import atexit
def _leak_RT_at_exit():
def _noop(x):
pass
_RT.__del__ = _noop # type: ignore[attr-defined]
atexit.register(_leak_RT_at_exit)
|
# mypy: allow-untyped-defs
"""torch.multiprocessing is a wrapper around the native :mod:`multiprocessing` module.
It registers custom reducers, that use shared memory to provide shared
views on the same data in different processes. Once the tensor/storage is moved
to shared_memory (see :func:`~torch.Tensor.share_memory_`), it will be possible
to send it to other processes without making any copies.
The API is 100% compatible with the original module - it's enough to change
``import multiprocessing`` to ``import torch.multiprocessing`` to have all the
tensors sent through the queues or shared via other mechanisms, moved to shared
memory.
Because of the similarity of APIs we do not document most of this package
contents, and we recommend referring to very good docs of the original module.
"""
import multiprocessing
import sys
import torch
from .reductions import init_reductions
__all__ = ["set_sharing_strategy", "get_sharing_strategy", "get_all_sharing_strategies"]
from multiprocessing import * # noqa: F403
__all__ += multiprocessing.__all__ # noqa: PLE0605 type: ignore[attr-defined]
# This call adds a Linux specific prctl(2) wrapper function to this module.
# See https://github.com/pytorch/pytorch/pull/14391 for more information.
torch._C._multiprocessing_init()
"""Add helper function to spawn N processes and wait for completion of any of
them. This depends `mp.get_context` which was added in Python 3.4."""
from .spawn import (
ENV_VAR_PARALLEL_START,
ProcessContext,
ProcessExitedException,
ProcessRaisedException,
spawn,
SpawnContext,
start_processes,
)
if sys.platform == "darwin" or sys.platform == "win32":
_sharing_strategy = "file_system"
_all_sharing_strategies = {"file_system"}
else:
_sharing_strategy = "file_descriptor"
_all_sharing_strategies = {"file_descriptor", "file_system"}
def set_sharing_strategy(new_strategy):
"""Set the strategy for sharing CPU tensors.
Args:
new_strategy (str): Name of the selected strategy. Should be one of
the values returned by :func:`get_all_sharing_strategies()`.
"""
global _sharing_strategy
assert new_strategy in _all_sharing_strategies
_sharing_strategy = new_strategy
def get_sharing_strategy():
"""Return the current strategy for sharing CPU tensors."""
return _sharing_strategy
def get_all_sharing_strategies():
"""Return a set of sharing strategies supported on a current system."""
return _all_sharing_strategies
def _set_thread_name(name: str) -> None:
"""Set the name of the current thread.
Args:
name (str): Name of the current thread.
"""
torch._C._set_thread_name(name)
def _get_thread_name() -> str:
"""Get the name of the current thread.
Returns:
str: Name of the current thread.
"""
return torch._C._get_thread_name()
init_reductions()
|
"""Example selectors.
**Example selector** implements logic for selecting examples to include them in prompts.
This allows us to select examples that are most relevant to the input.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.example_selectors.base import BaseExampleSelector
from langchain_core.example_selectors.length_based import (
LengthBasedExampleSelector,
)
from langchain_core.example_selectors.semantic_similarity import (
MaxMarginalRelevanceExampleSelector,
SemanticSimilarityExampleSelector,
sorted_values,
)
__all__ = (
"BaseExampleSelector",
"LengthBasedExampleSelector",
"MaxMarginalRelevanceExampleSelector",
"SemanticSimilarityExampleSelector",
"sorted_values",
)
_dynamic_imports = {
"BaseExampleSelector": "base",
"LengthBasedExampleSelector": "length_based",
"MaxMarginalRelevanceExampleSelector": "semantic_similarity",
"SemanticSimilarityExampleSelector": "semantic_similarity",
"sorted_values": "semantic_similarity",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Example selectors.
**Example selector** implements logic for selecting examples to include them in prompts.
This allows us to select examples that are most relevant to the input.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.example_selectors.base import BaseExampleSelector
from langchain_core.example_selectors.length_based import (
LengthBasedExampleSelector,
)
from langchain_core.example_selectors.semantic_similarity import (
MaxMarginalRelevanceExampleSelector,
SemanticSimilarityExampleSelector,
sorted_values,
)
__all__ = [
"BaseExampleSelector",
"LengthBasedExampleSelector",
"MaxMarginalRelevanceExampleSelector",
"SemanticSimilarityExampleSelector",
"sorted_values",
]
_dynamic_imports = {
"BaseExampleSelector": "base",
"LengthBasedExampleSelector": "length_based",
"MaxMarginalRelevanceExampleSelector": "semantic_similarity",
"SemanticSimilarityExampleSelector": "semantic_similarity",
"sorted_values": "semantic_similarity",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Init file."""
from llama_index.readers.kaltura_esearch.base import KalturaESearchReader
__all__ = ["KalturaESearchReader"]
|
"""Init file."""
from llama_index.readers.kaltura_esearch.base import KalturaESearchReader
__all__ = ["KalturaESearchReader"]
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import pytest
from mmengine import Config, DefaultScope
from mmengine.hub import get_config, get_model
from mmengine.utils import get_installed_path, is_installed
data_path = osp.join(osp.dirname(osp.dirname(__file__)), 'data/')
# mmdet has a more typical config structure, while mmpose has a complex
# config structure
@pytest.mark.skipif(
not (is_installed('mmdet') and is_installed('mmpose')),
reason='mmdet and mmpose should be installed')
def test_get_config():
# Test load base config.
base_cfg = get_config('mmdet::_base_/models/faster-rcnn_r50_fpn.py')
package_path = get_installed_path('mmdet')
test_base_cfg = Config.fromfile(
osp.join(package_path, '.mim',
'configs/_base_/models/faster-rcnn_r50_fpn.py'))
assert test_base_cfg._cfg_dict == base_cfg._cfg_dict
# Test load faster_rcnn config
cfg = get_config('mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py')
test_cfg = Config.fromfile(
osp.join(package_path, '.mim',
'configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'))
assert cfg._cfg_dict == test_cfg._cfg_dict
# Test pretrained
cfg = get_config(
'mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py', pretrained=True)
assert cfg.model_path == 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa E301
# Test load mmpose
get_config(
'mmpose::face/2d_kpt_sview_rgb_img/deeppose/wflw/res50_wflw_256x256'
'.py')
@pytest.mark.skipif(
not is_installed('mmdet'), reason='mmdet and mmpose should be installed')
def test_get_model():
# TODO compatible with downstream codebase.
DefaultScope.get_instance('test_get_model', scope_name='test_scope')
get_model('mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py')
assert DefaultScope.get_current_instance().scope_name == 'test_scope'
DefaultScope._instance_dict.pop('test_get_model')
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import pytest
from mmengine import Config, DefaultScope
from mmengine.hub import get_config, get_model
from mmengine.utils import get_installed_path, is_installed
data_path = osp.join(osp.dirname(osp.dirname(__file__)), 'data/')
# mmdet has a more typical config structure, while mmpose has a complex
# config structure
@pytest.mark.skipif(
not (is_installed('mmdet') and is_installed('mmpose')),
reason='mmdet and mmpose should be installed')
def test_get_config():
# Test load base config.
base_cfg = get_config('mmdet::_base_/models/faster_rcnn_r50_fpn.py')
package_path = get_installed_path('mmdet')
test_base_cfg = Config.fromfile(
osp.join(package_path, '.mim',
'configs/_base_/models/faster_rcnn_r50_fpn.py'))
assert test_base_cfg._cfg_dict == base_cfg._cfg_dict
# Test load faster_rcnn config
cfg = get_config('mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py')
test_cfg = Config.fromfile(
osp.join(package_path, '.mim',
'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'))
assert cfg._cfg_dict == test_cfg._cfg_dict
# Test pretrained
cfg = get_config(
'mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py', pretrained=True)
assert cfg.model_path == 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa E301
# Test load mmpose
get_config(
'mmpose::face/2d_kpt_sview_rgb_img/deeppose/wflw/res50_wflw_256x256'
'.py')
@pytest.mark.skipif(
not is_installed('mmdet'), reason='mmdet and mmpose should be installed')
def test_get_model():
# TODO compatible with downstream codebase.
DefaultScope.get_instance('test_get_model', scope_name='test_scope')
get_model('mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py')
assert DefaultScope.get_current_instance().scope_name == 'test_scope'
DefaultScope._instance_dict.pop('test_get_model')
|
"""
This scripts runs the evaluation (dev & test) for the AskUbuntu dataset
Usage:
python eval_askubuntu.py [sbert_model_name_or_path]
"""
import gzip
import logging
import os
import sys
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
model = SentenceTransformer(sys.argv[1])
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
training_corpus = os.path.join(askubuntu_folder, "train.unsupervised.txt")
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
logging.info("Dev performance before training")
dev_evaluator(model)
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Test performance before training")
test_evaluator(model)
|
"""
This scripts runs the evaluation (dev & test) for the AskUbuntu dataset
Usage:
python eval_askubuntu.py [sbert_model_name_or_path]
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
from sentence_transformers import util, evaluation
import logging
import os
import gzip
import sys
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
model = SentenceTransformer(sys.argv[1])
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = 'askubuntu'
training_corpus = os.path.join(askubuntu_folder, 'train.unsupervised.txt')
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ['text_tokenized.txt.gz', 'dev.txt', 'test.txt', 'train_random.txt']:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get('https://github.com/taolei87/askubuntu/raw/master/'+filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, 'text_tokenized.txt.gz'), 'rt', encoding='utf8') as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: #Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append({
'query': corpus[query_id],
'positive': [corpus[pid] for pid in relevant_id],
'negative': [corpus[pid] for pid in negative_ids]
})
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, 'dev.txt'))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, 'test.txt'))
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
logging.info("Dev performance before training")
dev_evaluator(model)
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Test performance before training")
test_evaluator(model)
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AudioNdArray, AudioTorchTensor, AudioUrl
from tests import TOYDATA_DIR
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
assert isinstance(tensor, AudioNdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AudioNdArray, AudioTorchTensor, AudioUrl
from tests import TOYDATA_DIR
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
assert isinstance(tensor, AudioNdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert str(proto).startswith('audio_url')
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .boxinst import BoxInst
from .base_detr import DetectionTransformer
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'CrowdDet', 'CondInst', 'BoxInst',
'DetectionTransformer'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .boxinst import BoxInst
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .condinst import CondInst
from .cornernet import CornerNet
from .crowddet import CrowdDet
from .d2_wrapper import Detectron2Wrapper
from .ddod import DDOD
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .lad import LAD
from .mask2former import Mask2Former
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .maskformer import MaskFormer
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .queryinst import QueryInst
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .rtmdet import RTMDet
from .scnet import SCNet
from .semi_base import SemiBaseDetector
from .single_stage import SingleStageDetector
from .soft_teacher import SoftTeacher
from .solo import SOLO
from .solov2 import SOLOv2
from .sparse_rcnn import SparseRCNN
from .tood import TOOD
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',
'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',
'MaskFormer', 'DDOD', 'Mask2Former', 'SemiBaseDetector', 'SoftTeacher',
'RTMDet', 'Detectron2Wrapper', 'RTMDet', 'CrowdDet', 'CondInst', 'BoxInst'
]
|
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray.typing import ArrayType
def cosine(x_mat: 'np.ndarray', y_mat: 'np.ndarray', eps: float = 1e-7) -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:param eps: a small jitter to avoid divde by zero
:return: np.ndarray with ndim=2
"""
return 1 - np.clip(
(np.dot(x_mat, y_mat.T) + eps)
/ (
np.outer(np.linalg.norm(x_mat, axis=1), np.linalg.norm(y_mat, axis=1)) + eps
),
-1,
1,
)
def sqeuclidean(x_mat: 'np.ndarray', y_mat: 'np.ndarray') -> 'np.ndarray':
"""Squared Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:return: np.ndarray with ndim=2
"""
return (
np.sum(y_mat**2, axis=1)
+ np.sum(x_mat**2, axis=1)[:, np.newaxis]
- 2 * np.dot(x_mat, y_mat.T)
)
def sparse_cosine(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
from scipy.sparse.linalg import norm
# we need the np.asarray otherwise we get a np.matrix object that iterates differently
return 1 - np.clip(
np.asarray(
x_mat.dot(y_mat.T) / (np.outer(norm(x_mat, axis=1), norm(y_mat, axis=1)))
),
-1,
1,
)
def sparse_sqeuclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
# we need the np.asarray otherwise we get a np.matrix object that iterates differently
return np.asarray(
y_mat.power(2).sum(axis=1).flatten()
+ x_mat.power(2).sum(axis=1)
- 2 * x_mat.dot(y_mat.T)
)
def sparse_euclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Sparse euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
return np.sqrt(sparse_sqeuclidean(x_mat, y_mat))
def euclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
return np.sqrt(sqeuclidean(x_mat, y_mat))
|
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from ...typing import ArrayType
def cosine(x_mat: 'np.ndarray', y_mat: 'np.ndarray', eps: float = 1e-7) -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:param eps: a small jitter to avoid divde by zero
:return: np.ndarray with ndim=2
"""
return 1 - np.clip(
(np.dot(x_mat, y_mat.T) + eps)
/ (
np.outer(np.linalg.norm(x_mat, axis=1), np.linalg.norm(y_mat, axis=1)) + eps
),
-1,
1,
)
def sqeuclidean(x_mat: 'np.ndarray', y_mat: 'np.ndarray') -> 'np.ndarray':
"""Squared Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:return: np.ndarray with ndim=2
"""
return (
np.sum(y_mat**2, axis=1)
+ np.sum(x_mat**2, axis=1)[:, np.newaxis]
- 2 * np.dot(x_mat, y_mat.T)
)
def sparse_cosine(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
from scipy.sparse.linalg import norm
# we need the np.asarray otherwise we get a np.matrix object that iterates differently
return 1 - np.clip(
np.asarray(
x_mat.dot(y_mat.T) / (np.outer(norm(x_mat, axis=1), norm(y_mat, axis=1)))
),
-1,
1,
)
def sparse_sqeuclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
# we need the np.asarray otherwise we get a np.matrix object that iterates differently
return np.asarray(
y_mat.power(2).sum(axis=1).flatten()
+ x_mat.power(2).sum(axis=1)
- 2 * x_mat.dot(y_mat.T)
)
def sparse_euclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Sparse euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
return np.sqrt(sparse_sqeuclidean(x_mat, y_mat))
def euclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
return np.sqrt(sqeuclidean(x_mat, y_mat))
|
import enum
from typing import Any, Callable, Dict, List, Tuple, Type, Union
import PIL.Image
import torch
from torch import nn
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision.prototype.transforms._utils import _isinstance
from torchvision.utils import _log_api_usage_once
class Transform(nn.Module):
# Class attribute defining transformed types. Other types are passed-through without any transformation
# We support both Types and callables that are able to do further checks on the type of the input.
_transformed_types: Tuple[Union[Type, Callable[[Any], bool]], ...] = (torch.Tensor, PIL.Image.Image)
def __init__(self) -> None:
super().__init__()
_log_api_usage_once(self)
def _check_inputs(self, flat_inputs: List[Any]) -> None:
pass
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
return dict()
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
raise NotImplementedError
def forward(self, *inputs: Any) -> Any:
flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0])
self._check_inputs(flat_inputs)
params = self._get_params(flat_inputs)
flat_outputs = [
self._transform(inpt, params) if _isinstance(inpt, self._transformed_types) else inpt
for inpt in flat_inputs
]
return tree_unflatten(flat_outputs, spec)
def extra_repr(self) -> str:
extra = []
for name, value in self.__dict__.items():
if name.startswith("_") or name == "training":
continue
if not isinstance(value, (bool, int, float, str, tuple, list, enum.Enum)):
continue
extra.append(f"{name}={value}")
return ", ".join(extra)
class _RandomApplyTransform(Transform):
def __init__(self, p: float = 0.5) -> None:
if not (0.0 <= p <= 1.0):
raise ValueError("`p` should be a floating point value in the interval [0.0, 1.0].")
super().__init__()
self.p = p
def forward(self, *inputs: Any) -> Any:
# We need to almost duplicate `Transform.forward()` here since we always want to check the inputs, but return
# early afterwards in case the random check triggers. The same result could be achieved by calling
# `super().forward()` after the random check, but that would call `self._check_inputs` twice.
inputs = inputs if len(inputs) > 1 else inputs[0]
flat_inputs, spec = tree_flatten(inputs)
self._check_inputs(flat_inputs)
if torch.rand(1) >= self.p:
return inputs
params = self._get_params(flat_inputs)
flat_outputs = [
self._transform(inpt, params) if _isinstance(inpt, self._transformed_types) else inpt
for inpt in flat_inputs
]
return tree_unflatten(flat_outputs, spec)
|
import enum
from typing import Any, Callable, Dict, List, Tuple, Type, Union
import PIL.Image
import torch
from torch import nn
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision.prototype import features
from torchvision.prototype.transforms._utils import _isinstance
from torchvision.utils import _log_api_usage_once
class Transform(nn.Module):
# Class attribute defining transformed types. Other types are passed-through without any transformation
_transformed_types: Tuple[Union[Type, Callable[[Any], bool]], ...] = (
features.is_simple_tensor,
features._Feature,
PIL.Image.Image,
)
def __init__(self) -> None:
super().__init__()
_log_api_usage_once(self)
def _check_inputs(self, flat_inputs: List[Any]) -> None:
pass
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
return dict()
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
raise NotImplementedError
def forward(self, *inputs: Any) -> Any:
flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0])
self._check_inputs(flat_inputs)
params = self._get_params(flat_inputs)
flat_outputs = [
self._transform(inpt, params) if _isinstance(inpt, self._transformed_types) else inpt
for inpt in flat_inputs
]
return tree_unflatten(flat_outputs, spec)
def extra_repr(self) -> str:
extra = []
for name, value in self.__dict__.items():
if name.startswith("_") or name == "training":
continue
if not isinstance(value, (bool, int, float, str, tuple, list, enum.Enum)):
continue
extra.append(f"{name}={value}")
return ", ".join(extra)
class _RandomApplyTransform(Transform):
def __init__(self, p: float = 0.5) -> None:
if not (0.0 <= p <= 1.0):
raise ValueError("`p` should be a floating point value in the interval [0.0, 1.0].")
super().__init__()
self.p = p
def forward(self, *inputs: Any) -> Any:
# We need to almost duplicate `Transform.forward()` here since we always want to check the inputs, but return
# early afterwards in case the random check triggers. The same result could be achieved by calling
# `super().forward()` after the random check, but that would call `self._check_inputs` twice.
inputs = inputs if len(inputs) > 1 else inputs[0]
flat_inputs, spec = tree_flatten(inputs)
self._check_inputs(flat_inputs)
if torch.rand(1) >= self.p:
return inputs
params = self._get_params(flat_inputs)
flat_outputs = [
self._transform(inpt, params) if _isinstance(inpt, self._transformed_types) else inpt
for inpt in flat_inputs
]
return tree_unflatten(flat_outputs, spec)
|
from datetime import datetime, timedelta
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.utils import comma_list
class DatetimeOutputParser(BaseOutputParser[datetime]):
"""Parse the output of an LLM call to a datetime."""
format: str = "%Y-%m-%dT%H:%M:%S.%fZ"
"""The string value that is used as the datetime format.
Update this to match the desired datetime format for your application.
"""
def get_format_instructions(self) -> str:
"""Returns the format instructions for the given format."""
if self.format == "%Y-%m-%dT%H:%M:%S.%fZ":
examples = comma_list(
[
"2023-07-04T14:30:00.000000Z",
"1999-12-31T23:59:59.999999Z",
"2025-01-01T00:00:00.000000Z",
]
)
else:
try:
now = datetime.now()
examples = comma_list(
[
now.strftime(self.format),
(now.replace(year=now.year - 1)).strftime(self.format),
(now - timedelta(days=1)).strftime(self.format),
]
)
except ValueError:
# Fallback if the format is very unusual
examples = f"e.g., a valid string in the format {self.format}"
return (
f"Write a datetime string that matches the "
f"following pattern: '{self.format}'.\n\n"
f"Examples: {examples}\n\n"
f"Return ONLY this string, no other words!"
)
def parse(self, response: str) -> datetime:
"""Parse a string into a datetime object."""
try:
return datetime.strptime(response.strip(), self.format)
except ValueError as e:
raise OutputParserException(
f"Could not parse datetime string: {response}"
) from e
@property
def _type(self) -> str:
return "datetime"
|
from datetime import datetime, timedelta
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.utils import comma_list
class DatetimeOutputParser(BaseOutputParser[datetime]):
"""Parse the output of an LLM call to a datetime."""
format: str = "%Y-%m-%dT%H:%M:%S.%fZ"
"""The string value that is used as the datetime format.
Update this to match the desired datetime format for your application.
"""
def get_format_instructions(self) -> str:
"""Returns the format instructions for the given format."""
if self.format == "%Y-%m-%dT%H:%M:%S.%fZ":
examples = comma_list(
[
"2023-07-04T14:30:00.000000Z",
"1999-12-31T23:59:59.999999Z",
"2025-01-01T00:00:00.000000Z",
]
)
else:
try:
now = datetime.now()
examples = comma_list(
[
now.strftime(self.format),
(now.replace(year=now.year - 1)).strftime(self.format),
(now - timedelta(days=1)).strftime(self.format),
]
)
except ValueError:
# Fallback if the format is very unusual
examples = f"e.g., a valid string in the format {self.format}"
return (
f"Write a datetime string that matches the "
f"following pattern: '{self.format}'.\n\n"
f"Examples: {examples}\n\n"
f"Return ONLY this string, no other words!"
)
def parse(self, response: str) -> datetime:
"""Parse a string into a datetime object."""
try:
return datetime.strptime(response.strip(), self.format)
except ValueError as e:
raise OutputParserException(
f"Could not parse datetime string: {response}"
) from e
@property
def _type(self) -> str:
return "datetime"
|
import os
from pathlib import Path
from torchaudio.datasets import librispeech
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin
# Used to generate a unique transcript for each dummy audio file
_NUMBERS = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"]
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
dataset_dir = os.path.join(root_dir, librispeech.FOLDER_IN_ARCHIVE, librispeech.URL)
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 16000 # 16kHz
seed = 0
for speaker_id in range(5):
speaker_path = os.path.join(dataset_dir, str(speaker_id))
os.makedirs(speaker_path, exist_ok=True)
for chapter_id in range(3):
chapter_path = os.path.join(speaker_path, str(chapter_id))
os.makedirs(chapter_path, exist_ok=True)
trans_content = []
for utterance_id in range(10):
filename = f"{speaker_id}-{chapter_id}-{utterance_id:04d}.wav"
path = os.path.join(chapter_path, filename)
transcript = " ".join([_NUMBERS[x] for x in [speaker_id, chapter_id, utterance_id]])
trans_content.append(f"{speaker_id}-{chapter_id}-{utterance_id:04d} {transcript}")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
save_wav(path, data, sample_rate)
sample = (normalize_wav(data), sample_rate, transcript, speaker_id, chapter_id, utterance_id)
mocked_data.append(sample)
seed += 1
trans_filename = f"{speaker_id}-{chapter_id}.trans.txt"
trans_path = os.path.join(chapter_path, trans_filename)
with open(trans_path, "w") as f:
f.write("\n".join(trans_content))
return mocked_data
class LibriSpeechTestMixin(TempDirMixin):
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
@classmethod
def tearDownClass(cls):
# In case of test failure
cls.librispeech_cls._ext_audio = ".flac"
def _test_librispeech(self, dataset):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, chapter_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert transcript == self.samples[i][2]
assert speaker_id == self.samples[i][3]
assert chapter_id == self.samples[i][4]
assert utterance_id == self.samples[i][5]
num_samples += 1
assert num_samples == len(self.samples)
self.librispeech_cls._ext_audio = ".flac"
def test_librispeech_str(self):
self.librispeech_cls._ext_audio = ".wav"
dataset = self.librispeech_cls(self.root_dir)
self._test_librispeech(dataset)
def test_librispeech_path(self):
self.librispeech_cls._ext_audio = ".wav"
dataset = self.librispeech_cls(Path(self.root_dir))
self._test_librispeech(dataset)
|
import os
from pathlib import Path
from torchaudio.datasets import librispeech
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin
# Used to generate a unique transcript for each dummy audio file
_NUMBERS = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"]
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
dataset_dir = os.path.join(root_dir, librispeech.FOLDER_IN_ARCHIVE, librispeech.URL)
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 16000 # 16kHz
seed = 0
for speaker_id in range(5):
speaker_path = os.path.join(dataset_dir, str(speaker_id))
os.makedirs(speaker_path, exist_ok=True)
for chapter_id in range(3):
chapter_path = os.path.join(speaker_path, str(chapter_id))
os.makedirs(chapter_path, exist_ok=True)
trans_content = []
for utterance_id in range(10):
filename = f"{speaker_id}-{chapter_id}-{utterance_id:04d}.wav"
path = os.path.join(chapter_path, filename)
transcript = " ".join([_NUMBERS[x] for x in [speaker_id, chapter_id, utterance_id]])
trans_content.append(f"{speaker_id}-{chapter_id}-{utterance_id:04d} {transcript}")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
save_wav(path, data, sample_rate)
sample = (normalize_wav(data), sample_rate, transcript, speaker_id, chapter_id, utterance_id)
mocked_data.append(sample)
seed += 1
trans_filename = f"{speaker_id}-{chapter_id}.trans.txt"
trans_path = os.path.join(chapter_path, trans_filename)
with open(trans_path, "w") as f:
f.write("\n".join(trans_content))
return mocked_data
class LibriSpeechTestMixin(TempDirMixin):
backend = "default"
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
@classmethod
def tearDownClass(cls):
# In case of test failure
cls.librispeech_cls._ext_audio = ".flac"
def _test_librispeech(self, dataset):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, chapter_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert transcript == self.samples[i][2]
assert speaker_id == self.samples[i][3]
assert chapter_id == self.samples[i][4]
assert utterance_id == self.samples[i][5]
num_samples += 1
assert num_samples == len(self.samples)
self.librispeech_cls._ext_audio = ".flac"
def test_librispeech_str(self):
self.librispeech_cls._ext_audio = ".wav"
dataset = self.librispeech_cls(self.root_dir)
self._test_librispeech(dataset)
def test_librispeech_path(self):
self.librispeech_cls._ext_audio = ".wav"
dataset = self.librispeech_cls(Path(self.root_dir))
self._test_librispeech(dataset)
|
import os
from typing import Callable, Optional
from .folder import ImageFolder
from .utils import download_and_extract_archive
class EuroSAT(ImageFolder):
"""RGB version of the `EuroSAT <https://github.com/phelber/eurosat>`_ Dataset.
Args:
root (string): Root directory of dataset where ``root/eurosat`` exists.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self.root = os.path.expanduser(root)
self._base_folder = os.path.join(self.root, "eurosat")
self._data_folder = os.path.join(self._base_folder, "2750")
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
super().__init__(self._data_folder, transform=transform, target_transform=target_transform)
self.root = os.path.expanduser(root)
def __len__(self) -> int:
return len(self.samples)
def _check_exists(self) -> bool:
return os.path.exists(self._data_folder)
def download(self) -> None:
if self._check_exists():
return
os.makedirs(self._base_folder, exist_ok=True)
download_and_extract_archive(
"https://madm.dfki.de/files/sentinel/EuroSAT.zip",
download_root=self._base_folder,
md5="c8fa014336c82ac7804f0398fcb19387",
)
|
import os
from typing import Callable, Optional
from .folder import ImageFolder
from .utils import download_and_extract_archive
class EuroSAT(ImageFolder):
"""RGB version of the `EuroSAT <https://github.com/phelber/eurosat>`_ Dataset.
Args:
root (string): Root directory of dataset where ``root/eurosat`` exists.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self.root = os.path.expanduser(root)
self._base_folder = os.path.join(self.root, "eurosat")
self._data_folder = os.path.join(self._base_folder, "2750")
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
super().__init__(self._data_folder, transform=transform, target_transform=target_transform)
self.root = os.path.expanduser(root)
def __len__(self) -> int:
return len(self.samples)
def _check_exists(self) -> bool:
return os.path.exists(self._data_folder)
def download(self) -> None:
if self._check_exists():
return
os.makedirs(self._base_folder, exist_ok=True)
download_and_extract_archive(
"https://madm.dfki.de/files/sentinel/EuroSAT.zip",
download_root=self._base_folder,
md5="c8fa014336c82ac7804f0398fcb19387",
)
|
from abc import ABC
import numpy as np
import pytest
from docarray import Document, DocumentArray
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.storage.memory import SequenceLikeMixin
from docarray.array.storage.redis.getsetdel import GetSetDelMixin
from docarray.array.storage.redis.backend import BackendMixin, RedisConfig
class StorageMixins(BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
class DocumentArrayDummy(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def _load_offset2ids(self):
pass
def _save_offset2ids(self):
pass
@pytest.fixture(scope='function')
def columns():
columns = {
'col_str': 'str',
'col_bytes': 'bytes',
'col_int': 'int',
'col_float': 'float',
'col_long': 'long',
'col_double': 'double',
}
return columns
@pytest.fixture(scope='function')
def da_redis(columns):
cfg = RedisConfig(n_dim=3, flush=True, columns=columns)
da_redis = DocumentArrayDummy(storage='redis', config=cfg)
return da_redis
@pytest.mark.parametrize(
'embedding', [[1, 2, 3], [1.0, 2.0, 3.0], [1, 2, 3, 4, 5], None]
)
@pytest.mark.parametrize('text', ['test_text', None])
@pytest.mark.parametrize(
'tag',
[
{'tag_1': 'tag1'},
{'tag_1': 'tag1', 'tag_2': 'tag2'},
{'tag_1': 'tag1', 'tag_2': 'tag2', 'tag_3': 'tag3'},
None,
],
)
@pytest.mark.parametrize(
'col',
[
{'col_str': 'hello', 'col_bytes': b'world'},
{'col_int': 1, 'col_float': 1.0},
{'col_long': 123, 'col_double': 1.1},
None,
],
)
def test_document_to_embedding(
embedding, text, tag, col, da_redis, columns, start_storage
):
tags = {}
if tag is not None:
tags.update(tag)
if col is not None:
tags.update(col)
doc = Document(embedding=embedding, text=text, tags=tags)
payload = da_redis._document_to_redis(doc)
if embedding is None:
assert np.allclose(
np.frombuffer(payload['embedding'], dtype=np.float32), np.zeros((3))
)
else:
assert np.allclose(
np.frombuffer(payload['embedding'], dtype=np.float32), np.array(embedding)
)
if text is None:
with pytest.raises(KeyError):
payload['text']
else:
assert payload['text'] == text
for col, _ in columns.items():
if col in tags:
assert payload[col] == tags[col]
else:
with pytest.raises(KeyError):
payload[col]
for key in tags:
if key not in (col for col in columns.keys()):
assert key not in payload
@pytest.mark.parametrize(
'doc',
[
Document(id='0'),
Document(id='1', text='hello world'),
Document(id='2', embedding=[1, 2, 3], tags={'tag_1': 'tag1', 'tag_2': 'tag2'}),
Document(
text='hello world',
embedding=[1, 2, 3],
tags={'tag_1': 'tag1', 'tag_2': 'tag2'},
chunks=[Document(text='token1'), Document(text='token2')],
),
],
)
def test_setgetdel_doc_by_id(doc, da_redis, start_storage):
da_redis._set_doc_by_id(doc.id, doc)
doc_get = da_redis._get_doc_by_id(doc.id)
assert doc == doc_get
da_redis._del_doc_by_id(doc.id)
with pytest.raises(KeyError):
da_redis._get_doc_by_id(doc.id)
def test_clear_storage(da_redis, start_storage):
for i in range(3):
doc = Document(id=str(i))
da_redis._set_doc_by_id(str(i), doc)
da_redis._clear_storage()
for i in range(3):
with pytest.raises(KeyError):
da_redis._get_doc_by_id(i)
def test_offset2ids(da_redis, start_storage):
ids = [str(i) for i in range(3)]
for id in ids:
doc = Document(id=id)
da_redis._set_doc_by_id(id, doc)
da_redis._offset2ids = Offset2ID(ids)
da_redis._save_offset2ids()
da_redis._load_offset2ids()
assert da_redis._offset2ids.ids == ids
|
from abc import ABC
import numpy as np
import pytest
from docarray import Document, DocumentArray
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.storage.memory import SequenceLikeMixin
from docarray.array.storage.redis.getsetdel import GetSetDelMixin
from docarray.array.storage.redis.backend import BackendMixin, RedisConfig
class StorageMixins(BackendMixin, GetSetDelMixin, SequenceLikeMixin, ABC):
...
class DocumentArrayDummy(StorageMixins, DocumentArray):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def _load_offset2ids(self):
pass
def _save_offset2ids(self):
pass
@pytest.fixture(scope='function')
def columns():
columns = [
('col_str', 'str'),
('col_bytes', 'bytes'),
('col_int', 'int'),
('col_float', 'float'),
('col_long', 'long'),
('col_double', 'double'),
]
return columns
@pytest.fixture(scope='function')
def da_redis(columns):
cfg = RedisConfig(n_dim=3, flush=True, columns=columns)
da_redis = DocumentArrayDummy(storage='redis', config=cfg)
return da_redis
@pytest.mark.parametrize(
'embedding', [[1, 2, 3], [1.0, 2.0, 3.0], [1, 2, 3, 4, 5], None]
)
@pytest.mark.parametrize('text', ['test_text', None])
@pytest.mark.parametrize(
'tag',
[
{'tag_1': 'tag1'},
{'tag_1': 'tag1', 'tag_2': 'tag2'},
{'tag_1': 'tag1', 'tag_2': 'tag2', 'tag_3': 'tag3'},
None,
],
)
@pytest.mark.parametrize(
'col',
[
{'col_str': 'hello', 'col_bytes': b'world'},
{'col_int': 1, 'col_float': 1.0},
{'col_long': 123, 'col_double': 1.1},
None,
],
)
def test_document_to_embedding(
embedding, text, tag, col, da_redis, columns, start_storage
):
tags = {}
if tag is not None:
tags.update(tag)
if col is not None:
tags.update(col)
doc = Document(embedding=embedding, text=text, tags=tags)
payload = da_redis._document_to_redis(doc)
if embedding is None:
assert np.allclose(
np.frombuffer(payload['embedding'], dtype=np.float32), np.zeros((3))
)
else:
assert np.allclose(
np.frombuffer(payload['embedding'], dtype=np.float32), np.array(embedding)
)
if text is None:
with pytest.raises(KeyError):
payload['text']
else:
assert payload['text'] == text
for col, _ in columns:
if col in tags:
assert payload[col] == tags[col]
else:
with pytest.raises(KeyError):
payload[col]
for key in tags:
if key not in (col[0] for col in columns):
assert key not in payload
@pytest.mark.parametrize(
'doc',
[
Document(id='0'),
Document(id='1', text='hello world'),
Document(id='2', embedding=[1, 2, 3], tags={'tag_1': 'tag1', 'tag_2': 'tag2'}),
Document(
text='hello world',
embedding=[1, 2, 3],
tags={'tag_1': 'tag1', 'tag_2': 'tag2'},
chunks=[Document(text='token1'), Document(text='token2')],
),
],
)
def test_setgetdel_doc_by_id(doc, da_redis, start_storage):
da_redis._set_doc_by_id(doc.id, doc)
doc_get = da_redis._get_doc_by_id(doc.id)
assert doc == doc_get
da_redis._del_doc_by_id(doc.id)
with pytest.raises(KeyError):
da_redis._get_doc_by_id(doc.id)
def test_clear_storage(da_redis, start_storage):
for i in range(3):
doc = Document(id=str(i))
da_redis._set_doc_by_id(str(i), doc)
da_redis._clear_storage()
for i in range(3):
with pytest.raises(KeyError):
da_redis._get_doc_by_id(i)
def test_offset2ids(da_redis, start_storage):
ids = [str(i) for i in range(3)]
for id in ids:
doc = Document(id=id)
da_redis._set_doc_by_id(id, doc)
da_redis._offset2ids = Offset2ID(ids)
da_redis._save_offset2ids()
da_redis._load_offset2ids()
assert da_redis._offset2ids.ids == ids
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ImageUrl')
IMAGE_FILE_FORMATS = ('png', 'jpeg', 'jpg')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to a .png, .jpeg, or .jpg file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_image_extension = any(url.endswith(ext) for ext in IMAGE_FILE_FORMATS)
if not has_image_extension:
raise ValueError(
f'Image URL must have one of the following extensions:'
f'{IMAGE_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> np.ndarray:
"""
Load the data from the url into a numpy.ndarray image tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDocument):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: np.ndarray representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ImageUrl')
IMAGE_FILE_FORMATS = ('png', 'jpeg', 'jpg')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to a .png, .jpeg, or .jpg file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_image_extension = any(url.endswith(ext) for ext in IMAGE_FILE_FORMATS)
if not has_image_extension:
raise ValueError(
f'Image URL must have one of the following extensions:'
f'{IMAGE_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> np.ndarray:
"""
Load the data from the url into a numpy.ndarray image tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDocument):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: np.ndarray representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
from collections import OrderedDict
import torch
from mmengine.runner import CheckpointLoader
convert_dict_fpn = {
'module.backbone.fpn.fpn_inner2': 'neck.lateral_convs.0.conv',
'module.backbone.fpn.fpn_inner3': 'neck.lateral_convs.1.conv',
'module.backbone.fpn.fpn_inner4': 'neck.lateral_convs.2.conv',
'module.backbone.fpn.fpn_layer2': 'neck.fpn_convs.0.conv',
'module.backbone.fpn.fpn_layer3': 'neck.fpn_convs.1.conv',
'module.backbone.fpn.fpn_layer4': 'neck.fpn_convs.2.conv',
'module.backbone.fpn.top_blocks.p6': 'neck.fpn_convs.3.conv',
'module.backbone.fpn.top_blocks.p7': 'neck.fpn_convs.4.conv',
}
def correct_unfold_reduction_order(x):
out_channel, in_channel = x.shape
x = x.reshape(out_channel, 4, in_channel // 4)
x = x[:, [0, 2, 1, 3], :].transpose(1, 2).reshape(out_channel, in_channel)
return x
def correct_unfold_norm_order(x):
in_channel = x.shape[0]
x = x.reshape(4, in_channel // 4)
x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
return x
def convert(ckpt):
new_ckpt = OrderedDict()
for k, v in list(ckpt.items()):
if 'anchor_generator' in k or 'resizer' in k or 'cls_logits' in k:
continue
new_v = v
if 'module.backbone.body' in k:
new_k = k.replace('module.backbone.body', 'backbone')
if 'patch_embed.proj' in new_k:
new_k = new_k.replace('patch_embed.proj',
'patch_embed.projection')
elif 'pos_drop' in new_k:
new_k = new_k.replace('pos_drop', 'drop_after_pos')
if 'layers' in new_k:
new_k = new_k.replace('layers', 'stages')
if 'mlp.fc1' in new_k:
new_k = new_k.replace('mlp.fc1', 'ffn.layers.0.0')
elif 'mlp.fc2' in new_k:
new_k = new_k.replace('mlp.fc2', 'ffn.layers.1')
elif 'attn' in new_k:
new_k = new_k.replace('attn', 'attn.w_msa')
if 'downsample' in k:
if 'reduction.' in k:
new_v = correct_unfold_reduction_order(v)
elif 'norm.' in k:
new_v = correct_unfold_norm_order(v)
elif 'module.backbone.fpn' in k:
old_k = k.replace('.weight', '')
old_k = old_k.replace('.bias', '')
new_k = k.replace(old_k, convert_dict_fpn[old_k])
elif 'module.language_backbone' in k:
new_k = k.replace('module.language_backbone',
'language_model.language_backbone')
if 'pooler' in k:
continue
elif 'module.rpn' in k:
if 'module.rpn.head.scales' in k:
new_k = k.replace('module.rpn.head.scales',
'bbox_head.head.scales')
else:
new_k = k.replace('module.rpn', 'bbox_head')
if 'anchor_generator' in k and 'resizer' in k:
continue
else:
print('skip:', k)
continue
if 'DyConv' in new_k:
new_k = new_k.replace('DyConv', 'dyconvs')
if 'AttnConv' in new_k:
new_k = new_k.replace('AttnConv', 'attnconv')
new_ckpt[new_k] = new_v
return new_ckpt
def main():
parser = argparse.ArgumentParser(
description='Convert keys to mmdet style.')
parser.add_argument(
'src', default='glip_a_tiny_o365.pth', help='src model path or url')
# The dst path must be a full path of the new checkpoint.
parser.add_argument(
'--dst', default='glip_tiny_a_mmdet.pth', help='save path')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
weight = convert(state_dict)
torch.save(weight, args.dst)
sha = subprocess.check_output(['sha256sum', args.dst]).decode()
final_file = args.dst.replace('.pth', '') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', args.dst, final_file])
print(f'Done!!, save to {final_file}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
from collections import OrderedDict
import torch
from mmengine.runner import CheckpointLoader
convert_dict_fpn = {
'module.backbone.fpn.fpn_inner2': 'neck.lateral_convs.0.conv',
'module.backbone.fpn.fpn_inner3': 'neck.lateral_convs.1.conv',
'module.backbone.fpn.fpn_inner4': 'neck.lateral_convs.2.conv',
'module.backbone.fpn.fpn_layer2': 'neck.fpn_convs.0.conv',
'module.backbone.fpn.fpn_layer3': 'neck.fpn_convs.1.conv',
'module.backbone.fpn.fpn_layer4': 'neck.fpn_convs.2.conv',
'module.backbone.fpn.top_blocks.p6': 'neck.fpn_convs.3.conv',
'module.backbone.fpn.top_blocks.p7': 'neck.fpn_convs.4.conv',
}
def correct_unfold_reduction_order(x):
out_channel, in_channel = x.shape
x = x.reshape(out_channel, 4, in_channel // 4)
x = x[:, [0, 2, 1, 3], :].transpose(1, 2).reshape(out_channel, in_channel)
return x
def correct_unfold_norm_order(x):
in_channel = x.shape[0]
x = x.reshape(4, in_channel // 4)
x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
return x
def convert(ckpt):
new_ckpt = OrderedDict()
for k, v in list(ckpt.items()):
if 'anchor_generator' in k or 'resizer' in k or 'cls_logits' in k:
continue
new_v = v
if 'module.backbone.body' in k:
new_k = k.replace('module.backbone.body', 'backbone')
if 'patch_embed.proj' in new_k:
new_k = new_k.replace('patch_embed.proj',
'patch_embed.projection')
elif 'pos_drop' in new_k:
new_k = new_k.replace('pos_drop', 'drop_after_pos')
if 'layers' in new_k:
new_k = new_k.replace('layers', 'stages')
if 'mlp.fc1' in new_k:
new_k = new_k.replace('mlp.fc1', 'ffn.layers.0.0')
elif 'mlp.fc2' in new_k:
new_k = new_k.replace('mlp.fc2', 'ffn.layers.1')
elif 'attn' in new_k:
new_k = new_k.replace('attn', 'attn.w_msa')
if 'downsample' in k:
if 'reduction.' in k:
new_v = correct_unfold_reduction_order(v)
elif 'norm.' in k:
new_v = correct_unfold_norm_order(v)
elif 'module.backbone.fpn' in k:
old_k = k.replace('.weight', '')
old_k = old_k.replace('.bias', '')
new_k = k.replace(old_k, convert_dict_fpn[old_k])
elif 'module.language_backbone' in k:
new_k = k.replace('module.language_backbone',
'language_model.language_backbone')
if 'pooler' in k:
continue
elif 'module.rpn' in k:
if 'module.rpn.head.scales' in k:
new_k = k.replace('module.rpn.head.scales',
'bbox_head.head.scales')
else:
new_k = k.replace('module.rpn', 'bbox_head')
if 'anchor_generator' in k and 'resizer' in k:
continue
else:
print('skip:', k)
continue
if 'DyConv' in new_k:
new_k = new_k.replace('DyConv', 'dyconvs')
if 'AttnConv' in new_k:
new_k = new_k.replace('AttnConv', 'attnconv')
new_ckpt[new_k] = new_v
return new_ckpt
def main():
parser = argparse.ArgumentParser(
description='Convert keys in pretrained eva '
'models to mmpretrain style.')
parser.add_argument(
'src', default='glip_a_tiny_o365.pth', help='src model path or url')
# The dst path must be a full path of the new checkpoint.
parser.add_argument(
'--dst', default='glip_tiny_a_mmdet.pth', help='save path')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
weight = convert(state_dict)
torch.save(weight, args.dst)
sha = subprocess.check_output(['sha256sum', args.dst]).decode()
final_file = args.dst.replace('.pth', '') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', args.dst, final_file])
print(f'Done!!, save to {final_file}')
if __name__ == '__main__':
main()
|
import logging
from typing import List
from backend.blocks.apollo._auth import ApolloCredentials
from backend.blocks.apollo.models import (
Contact,
Organization,
SearchOrganizationsRequest,
SearchOrganizationsResponse,
SearchPeopleRequest,
SearchPeopleResponse,
)
from backend.util.request import Requests
logger = logging.getLogger(name=__name__)
class ApolloClient:
"""Client for the Apollo API"""
API_URL = "https://api.apollo.io/api/v1"
def __init__(self, credentials: ApolloCredentials):
self.credentials = credentials
self.requests = Requests()
def _get_headers(self) -> dict[str, str]:
return {"x-api-key": self.credentials.api_key.get_secret_value()}
async def search_people(self, query: SearchPeopleRequest) -> List[Contact]:
"""Search for people in Apollo"""
response = await self.requests.get(
f"{self.API_URL}/mixed_people/search",
headers=self._get_headers(),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
data = response.json()
parsed_response = SearchPeopleResponse(**data)
if parsed_response.pagination.total_entries == 0:
return []
people = parsed_response.people
# handle pagination
if (
query.max_results is not None
and query.max_results < parsed_response.pagination.total_entries
and len(people) < query.max_results
):
while (
len(people) < query.max_results
and query.page < parsed_response.pagination.total_pages
and len(parsed_response.people) > 0
):
query.page += 1
response = await self.requests.get(
f"{self.API_URL}/mixed_people/search",
headers=self._get_headers(),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
data = response.json()
parsed_response = SearchPeopleResponse(**data)
people.extend(parsed_response.people[: query.max_results - len(people)])
logger.info(f"Found {len(people)} people")
return people[: query.max_results] if query.max_results else people
async def search_organizations(
self, query: SearchOrganizationsRequest
) -> List[Organization]:
"""Search for organizations in Apollo"""
response = await self.requests.get(
f"{self.API_URL}/mixed_companies/search",
headers=self._get_headers(),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
data = response.json()
parsed_response = SearchOrganizationsResponse(**data)
if parsed_response.pagination.total_entries == 0:
return []
organizations = parsed_response.organizations
# handle pagination
if (
query.max_results is not None
and query.max_results < parsed_response.pagination.total_entries
and len(organizations) < query.max_results
):
while (
len(organizations) < query.max_results
and query.page < parsed_response.pagination.total_pages
and len(parsed_response.organizations) > 0
):
query.page += 1
response = await self.requests.get(
f"{self.API_URL}/mixed_companies/search",
headers=self._get_headers(),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
data = response.json()
parsed_response = SearchOrganizationsResponse(**data)
organizations.extend(
parsed_response.organizations[
: query.max_results - len(organizations)
]
)
logger.info(f"Found {len(organizations)} organizations")
return (
organizations[: query.max_results] if query.max_results else organizations
)
|
import logging
from typing import List
from backend.blocks.apollo._auth import ApolloCredentials
from backend.blocks.apollo.models import (
Contact,
Organization,
SearchOrganizationsRequest,
SearchOrganizationsResponse,
SearchPeopleRequest,
SearchPeopleResponse,
)
from backend.util.request import Requests
logger = logging.getLogger(name=__name__)
class ApolloClient:
"""Client for the Apollo API"""
API_URL = "https://api.apollo.io/api/v1"
def __init__(self, credentials: ApolloCredentials):
self.credentials = credentials
self.requests = Requests()
def _get_headers(self) -> dict[str, str]:
return {"x-api-key": self.credentials.api_key.get_secret_value()}
def search_people(self, query: SearchPeopleRequest) -> List[Contact]:
"""Search for people in Apollo"""
response = self.requests.get(
f"{self.API_URL}/mixed_people/search",
headers=self._get_headers(),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
parsed_response = SearchPeopleResponse(**response.json())
if parsed_response.pagination.total_entries == 0:
return []
people = parsed_response.people
# handle pagination
if (
query.max_results is not None
and query.max_results < parsed_response.pagination.total_entries
and len(people) < query.max_results
):
while (
len(people) < query.max_results
and query.page < parsed_response.pagination.total_pages
and len(parsed_response.people) > 0
):
query.page += 1
response = self.requests.get(
f"{self.API_URL}/mixed_people/search",
headers=self._get_headers(),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
parsed_response = SearchPeopleResponse(**response.json())
people.extend(parsed_response.people[: query.max_results - len(people)])
logger.info(f"Found {len(people)} people")
return people[: query.max_results] if query.max_results else people
def search_organizations(
self, query: SearchOrganizationsRequest
) -> List[Organization]:
"""Search for organizations in Apollo"""
response = self.requests.get(
f"{self.API_URL}/mixed_companies/search",
headers=self._get_headers(),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
parsed_response = SearchOrganizationsResponse(**response.json())
if parsed_response.pagination.total_entries == 0:
return []
organizations = parsed_response.organizations
# handle pagination
if (
query.max_results is not None
and query.max_results < parsed_response.pagination.total_entries
and len(organizations) < query.max_results
):
while (
len(organizations) < query.max_results
and query.page < parsed_response.pagination.total_pages
and len(parsed_response.organizations) > 0
):
query.page += 1
response = self.requests.get(
f"{self.API_URL}/mixed_companies/search",
headers=self._get_headers(),
params=query.model_dump(exclude={"credentials", "max_results"}),
)
parsed_response = SearchOrganizationsResponse(**response.json())
organizations.extend(
parsed_response.organizations[
: query.max_results - len(organizations)
]
)
logger.info(f"Found {len(organizations)} organizations")
return (
organizations[: query.max_results] if query.max_results else organizations
)
|
"""This modules defines all kinds of exceptions raised in Jina."""
from typing import Set, Union
import grpc.aio
class BaseJinaException(BaseException):
"""A base class for all exceptions raised by Jina"""
class RuntimeFailToStart(SystemError, BaseJinaException):
"""When pod/deployment is failed to started."""
class RuntimeTerminated(KeyboardInterrupt, BaseJinaException):
"""The event loop of BasePod ends."""
class FlowTopologyError(Exception, BaseJinaException):
"""Flow exception when the topology is ambiguous."""
class FlowMissingDeploymentError(Exception, BaseJinaException):
"""Flow exception when a deployment can not be found in the flow."""
class FlowBuildLevelError(Exception, BaseJinaException):
"""Flow exception when required build level is higher than the current build level."""
class BadConfigSource(FileNotFoundError, BaseJinaException):
"""The yaml config file is bad, not loadable or not exist."""
class BadClient(Exception, BaseJinaException):
"""A wrongly defined client, can not communicate with jina server correctly."""
class BadServer(Exception, BaseJinaException):
"""Error happens on the server side."""
class BadClientCallback(BadClient, BaseJinaException):
"""Error in the callback function on the client side."""
class BadClientInput(BadClient, BaseJinaException):
"""Error in the request generator function on the client side."""
class BadRequestType(TypeError, BaseJinaException):
"""Exception when can not construct a request object from given data."""
class BadImageNameError(Exception, BaseJinaException):
"""Exception when an image name can not be found either local & remote"""
class BadYAMLVersion(Exception, BaseJinaException):
"""Exception when YAML config specifies a wrong version number."""
class NotSupportedError(Exception, BaseJinaException):
"""Exception when user accidentally using a retired argument."""
class RuntimeRunForeverEarlyError(Exception, BaseJinaException):
"""Raised when an error occurs when starting the run_forever of Runtime"""
class DockerVersionError(SystemError, BaseJinaException):
"""Raised when the docker version is incompatible"""
class NoContainerizedError(Exception, BaseJinaException):
"""Raised when trying to use non-containerized Executor in K8s or Docker Compose"""
class PortAlreadyUsed(RuntimeError, BaseJinaException):
"""Raised when to use a port which is already used"""
class InternalNetworkError(grpc.aio.AioRpcError, BaseJinaException):
"""
Raised when communication between microservices fails.
Needed to propagate information about the root cause event, such as request_id and dest_addr.
"""
def __init__(
self,
og_exception: grpc.aio.AioRpcError,
request_id: str = '',
dest_addr: Union[str, Set[str]] = {''},
details: str = '',
):
"""
:param og_exception: the original exception that caused the network error
:param request_id: id of the request that caused the error
:param dest_addr: destination (microservice) address(es) of the problematic network call(s)
:param details: details of the error
"""
self.og_exception = og_exception
self.request_id = request_id
self.dest_addr = dest_addr
self._details = details
super().__init__(
og_exception.code(),
og_exception.initial_metadata(),
og_exception.trailing_metadata(),
self.details(),
og_exception.debug_error_string(),
)
def __str__(self):
return self.details()
def __repr__(self):
return self.__str__()
def code(self):
"""
:return: error code of this exception
"""
return self.og_exception.code()
def details(self):
"""
:return: details of this exception
"""
return self._details if self._details else self.og_exception.details()
|
"""This modules defines all kinds of exceptions raised in Jina."""
from typing import Set, Union
import grpc.aio
class BaseJinaException(BaseException):
"""A base class for all exceptions raised by Jina"""
class RuntimeFailToStart(SystemError, BaseJinaException):
"""When pod/deployment is failed to started."""
class RuntimeTerminated(KeyboardInterrupt, BaseJinaException):
"""The event loop of BasePod ends."""
class FlowTopologyError(Exception, BaseJinaException):
"""Flow exception when the topology is ambiguous."""
class FlowMissingDeploymentError(Exception, BaseJinaException):
"""Flow exception when a deployment can not be found in the flow."""
class FlowBuildLevelError(Exception, BaseJinaException):
"""Flow exception when required build level is higher than the current build level."""
class BadConfigSource(FileNotFoundError, BaseJinaException):
"""The yaml config file is bad, not loadable or not exist."""
class BadClient(Exception, BaseJinaException):
"""A wrongly defined client, can not communicate with jina server correctly."""
class BadClientCallback(BadClient, BaseJinaException):
"""Error in the callback function on the client side."""
class BadClientInput(BadClient, BaseJinaException):
"""Error in the request generator function on the client side."""
class BadRequestType(TypeError, BaseJinaException):
"""Exception when can not construct a request object from given data."""
class BadImageNameError(Exception, BaseJinaException):
"""Exception when an image name can not be found either local & remote"""
class BadYAMLVersion(Exception, BaseJinaException):
"""Exception when YAML config specifies a wrong version number."""
class NotSupportedError(Exception, BaseJinaException):
"""Exception when user accidentally using a retired argument."""
class RuntimeRunForeverEarlyError(Exception, BaseJinaException):
"""Raised when an error occurs when starting the run_forever of Runtime"""
class DockerVersionError(SystemError, BaseJinaException):
"""Raised when the docker version is incompatible"""
class NoContainerizedError(Exception, BaseJinaException):
"""Raised when trying to use non-containerized Executor in K8s or Docker Compose"""
class PortAlreadyUsed(RuntimeError, BaseJinaException):
"""Raised when to use a port which is already used"""
class InternalNetworkError(grpc.aio.AioRpcError, BaseJinaException):
"""
Raised when communication between microservices fails.
Needed to propagate information about the root cause event, such as request_id and dest_addr.
"""
def __init__(
self,
og_exception: grpc.aio.AioRpcError,
request_id: str = '',
dest_addr: Union[str, Set[str]] = {''},
details: str = '',
):
"""
:param og_exception: the original exception that caused the network error
:param request_id: id of the request that caused the error
:param dest_addr: destination (microservice) address(es) of the problematic network call(s)
:param details: details of the error
"""
self.og_exception = og_exception
self.request_id = request_id
self.dest_addr = dest_addr
self._details = details
super().__init__(
og_exception.code(),
og_exception.initial_metadata(),
og_exception.trailing_metadata(),
self.details(),
og_exception.debug_error_string(),
)
def __str__(self):
return self.details()
def __repr__(self):
return self.__str__()
def code(self):
"""
:return: error code of this exception
"""
return self.og_exception.code()
def details(self):
"""
:return: details of this exception
"""
return self._details if self._details else self.og_exception.details()
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Callable, List
import pytest
from jina import DocumentArray, Flow
from ...transform_encoder import TransformerTorchEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(data_generator: Callable, request_size: int):
with Flow(return_results=True).add(uses=TransformerTorchEncoder) as flow:
resp = flow.post(
on="/index", inputs=data_generator(), request_size=request_size, return_results=True
)
assert min(len(resp) * request_size, 50) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
@pytest.mark.parametrize(
["docs", "docs_per_path", "traversal_path"],
[
(pytest.lazy_fixture("docs_with_text"), [["r", 10], ["c", 0], ["cc", 0]], "r"),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[["r", 0], ["c", 10], ["cc", 0]],
"c",
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[["r", 0], ["c", 0], ["cc", 10]],
"cc",
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
return (
len(
DocumentArray(res[0].docs)
.traverse_flat([path])
.get_attributes("embedding")
)
== count
)
return validate
flow = Flow(return_results=True).add(uses=TransformerTorchEncoder)
with flow:
resp = flow.post(
on="/test", inputs=docs, parameters={"traversal_paths": [traversal_path]}, return_results=True
)
assert validate_traversal(docs_per_path)(resp)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Callable, List
import pytest
from jina import DocumentArray, Flow
from jinahub.encoder.transform_encoder import TransformerTorchEncoder
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(data_generator: Callable, request_size: int):
with Flow(return_results=True).add(uses=TransformerTorchEncoder) as flow:
resp = flow.post(
on="/index", inputs=data_generator(), request_size=request_size, return_results=True
)
assert min(len(resp) * request_size, 50) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
@pytest.mark.parametrize(
["docs", "docs_per_path", "traversal_path"],
[
(pytest.lazy_fixture("docs_with_text"), [["r", 10], ["c", 0], ["cc", 0]], "r"),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[["r", 0], ["c", 10], ["cc", 0]],
"c",
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[["r", 0], ["c", 0], ["cc", 10]],
"cc",
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
return (
len(
DocumentArray(res[0].docs)
.traverse_flat([path])
.get_attributes("embedding")
)
== count
)
return validate
flow = Flow(return_results=True).add(uses=TransformerTorchEncoder)
with flow:
resp = flow.post(
on="/test", inputs=docs, parameters={"traversal_paths": [traversal_path]}, return_results=True
)
assert validate_traversal(docs_per_path)(resp)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import random
import numpy as np
import torch
from mmdet.registry import DATASETS, TRANSFORMS
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
PIPELINES = TRANSFORMS
def _concat_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
separate_eval = cfg.get('separate_eval', True)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
# pop 'separate_eval' since it is not a valid key for common datasets.
if 'separate_eval' in data_cfg:
data_cfg.pop('separate_eval')
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets, separate_eval)
# TODO: Need to refactor later
def build_dataset(cfg, default_args=None):
from .dataset_wrappers import ClassBalancedDataset, MultiImageMixDataset
if cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif cfg['type'] == 'MultiImageMixDataset':
cp_cfg = copy.deepcopy(cfg)
cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'])
cp_cfg.pop('type')
dataset = MultiImageMixDataset(**cp_cfg)
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = DATASETS.build(cfg, default_args=default_args)
return dataset
def worker_init_fn(worker_id, num_workers, rank, seed):
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import random
import numpy as np
import torch
from mmdet.registry import DATASETS, TRANSFORMS
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
PIPELINES = TRANSFORMS
def _concat_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
separate_eval = cfg.get('separate_eval', True)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
# pop 'separate_eval' since it is not a valid key for common datasets.
if 'separate_eval' in data_cfg:
data_cfg.pop('separate_eval')
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets, separate_eval)
def build_dataset(cfg, default_args=None):
from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
MultiImageMixDataset, RepeatDataset)
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'ConcatDataset':
dataset = ConcatDataset(
[build_dataset(c, default_args) for c in cfg['datasets']],
cfg.get('separate_eval', True))
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif cfg['type'] == 'MultiImageMixDataset':
cp_cfg = copy.deepcopy(cfg)
cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'])
cp_cfg.pop('type')
dataset = MultiImageMixDataset(**cp_cfg)
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = DATASETS.build(cfg, default_args=default_args)
return dataset
def worker_init_fn(worker_id, num_workers, rank, seed):
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
|
"""
This file runs Masked Language Model. You provide a training file. Each line is interpreted as a sentence / paragraph.
Optionally, you can also provide a dev file.
The fine-tuned model is stored in the output/model_name folder.
Usage:
python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]
"""
import gzip
import sys
from datetime import datetime
from transformers import (
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForWholeWordMask,
Trainer,
TrainingArguments,
)
if len(sys.argv) < 3:
print("Usage: python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]")
exit()
model_name = sys.argv[1]
per_device_train_batch_size = 64
save_steps = 1000 # Save model every 1k steps
num_train_epochs = 3 # Number of epochs
use_fp16 = False # Set to True, if your GPU supports FP16 operations
max_length = 100 # Max length for a text input
do_whole_word_mask = True # If set to true, whole words are masked
mlm_prob = 0.15 # Probability that a word is replaced by a [MASK] token
# Load the model
model = AutoModelForMaskedLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
output_dir = "output/{}-{}".format(model_name.replace("/", "_"), datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
print("Save checkpoints to:", output_dir)
##### Load our training datasets
train_sentences = []
train_path = sys.argv[2]
with gzip.open(train_path, "rt", encoding="utf8") if train_path.endswith(".gz") else open(
train_path, "r", encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
print("Train sentences:", len(train_sentences))
dev_sentences = []
if len(sys.argv) >= 4:
dev_path = sys.argv[3]
with gzip.open(dev_path, "rt", encoding="utf8") if dev_path.endswith(".gz") else open(
dev_path, "r", encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
dev_sentences.append(line)
print("Dev sentences:", len(dev_sentences))
# A dataset wrapper, that tokenizes our data on-the-fly
class TokenizedSentencesDataset:
def __init__(self, sentences, tokenizer, max_length, cache_tokenization=False):
self.tokenizer = tokenizer
self.sentences = sentences
self.max_length = max_length
self.cache_tokenization = cache_tokenization
def __getitem__(self, item):
if not self.cache_tokenization:
return self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
if isinstance(self.sentences[item], str):
self.sentences[item] = self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
return self.sentences[item]
def __len__(self):
return len(self.sentences)
train_dataset = TokenizedSentencesDataset(train_sentences, tokenizer, max_length)
dev_dataset = (
TokenizedSentencesDataset(dev_sentences, tokenizer, max_length, cache_tokenization=True)
if len(dev_sentences) > 0
else None
)
##### Training arguments
if do_whole_word_mask:
data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
else:
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
training_args = TrainingArguments(
output_dir=output_dir,
overwrite_output_dir=True,
num_train_epochs=num_train_epochs,
evaluation_strategy="steps" if dev_dataset is not None else "no",
per_device_train_batch_size=per_device_train_batch_size,
eval_steps=save_steps,
save_steps=save_steps,
logging_steps=save_steps,
save_total_limit=1,
prediction_loss_only=True,
fp16=use_fp16,
)
trainer = Trainer(
model=model, args=training_args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=dev_dataset
)
print("Save tokenizer to:", output_dir)
tokenizer.save_pretrained(output_dir)
trainer.train()
print("Save model to:", output_dir)
model.save_pretrained(output_dir)
print("Training done")
|
"""
This file runs Masked Language Model. You provide a training file. Each line is interpreted as a sentence / paragraph.
Optionally, you can also provide a dev file.
The fine-tuned model is stored in the output/model_name folder.
Usage:
python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]
"""
from transformers import AutoModelForMaskedLM, AutoTokenizer
from transformers import DataCollatorForLanguageModeling, DataCollatorForWholeWordMask
from transformers import Trainer, TrainingArguments
import sys
import gzip
from datetime import datetime
if len(sys.argv) < 3:
print("Usage: python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]")
exit()
model_name = sys.argv[1]
per_device_train_batch_size = 64
save_steps = 1000 #Save model every 1k steps
num_train_epochs = 3 #Number of epochs
use_fp16 = False #Set to True, if your GPU supports FP16 operations
max_length = 100 #Max length for a text input
do_whole_word_mask = True #If set to true, whole words are masked
mlm_prob = 0.15 #Probability that a word is replaced by a [MASK] token
# Load the model
model = AutoModelForMaskedLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
output_dir = "output/{}-{}".format(model_name.replace("/", "_"), datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
print("Save checkpoints to:", output_dir)
##### Load our training datasets
train_sentences = []
train_path = sys.argv[2]
with gzip.open(train_path, 'rt', encoding='utf8') if train_path.endswith('.gz') else open(train_path, 'r', encoding='utf8') as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
print("Train sentences:", len(train_sentences))
dev_sentences = []
if len(sys.argv) >= 4:
dev_path = sys.argv[3]
with gzip.open(dev_path, 'rt', encoding='utf8') if dev_path.endswith('.gz') else open(dev_path, 'r', encoding='utf8') as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
dev_sentences.append(line)
print("Dev sentences:", len(dev_sentences))
#A dataset wrapper, that tokenizes our data on-the-fly
class TokenizedSentencesDataset:
def __init__(self, sentences, tokenizer, max_length, cache_tokenization=False):
self.tokenizer = tokenizer
self.sentences = sentences
self.max_length = max_length
self.cache_tokenization = cache_tokenization
def __getitem__(self, item):
if not self.cache_tokenization:
return self.tokenizer(self.sentences[item], add_special_tokens=True, truncation=True, max_length=self.max_length, return_special_tokens_mask=True)
if isinstance(self.sentences[item], str):
self.sentences[item] = self.tokenizer(self.sentences[item], add_special_tokens=True, truncation=True, max_length=self.max_length, return_special_tokens_mask=True)
return self.sentences[item]
def __len__(self):
return len(self.sentences)
train_dataset = TokenizedSentencesDataset(train_sentences, tokenizer, max_length)
dev_dataset = TokenizedSentencesDataset(dev_sentences, tokenizer, max_length, cache_tokenization=True) if len(dev_sentences) > 0 else None
##### Training arguments
if do_whole_word_mask:
data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
else:
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
training_args = TrainingArguments(
output_dir=output_dir,
overwrite_output_dir=True,
num_train_epochs=num_train_epochs,
evaluation_strategy="steps" if dev_dataset is not None else "no",
per_device_train_batch_size=per_device_train_batch_size,
eval_steps=save_steps,
save_steps=save_steps,
logging_steps=save_steps,
save_total_limit=1,
prediction_loss_only=True,
fp16=use_fp16
)
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=dev_dataset
)
print("Save tokenizer to:", output_dir)
tokenizer.save_pretrained(output_dir)
trainer.train()
print("Save model to:", output_dir)
model.save_pretrained(output_dir)
print("Training done")
|
from typing import Any, Mapping, Optional
from llama_index.readers.airbyte_cdk.base import AirbyteCDKReader, RecordHandler
class AirbyteTypeformReader(AirbyteCDKReader):
"""
AirbyteTypeformReader reader.
Retrieve documents from Typeform
Args:
config: The config object for the typeform source.
"""
def __init__(
self,
config: Mapping[str, Any],
record_handler: Optional[RecordHandler] = None,
) -> None:
"""Initialize with parameters."""
import source_typeform
super().__init__(
source_class=source_typeform.SourceTypeform,
config=config,
record_handler=record_handler,
)
|
from typing import Any, Mapping, Optional
from llama_index.readers.airbyte_cdk.base import AirbyteCDKReader, RecordHandler
class AirbyteTypeformReader(AirbyteCDKReader):
"""AirbyteTypeformReader reader.
Retrieve documents from Typeform
Args:
config: The config object for the typeform source.
"""
def __init__(
self,
config: Mapping[str, Any],
record_handler: Optional[RecordHandler] = None,
) -> None:
"""Initialize with parameters."""
import source_typeform
super().__init__(
source_class=source_typeform.SourceTypeform,
config=config,
record_handler=record_handler,
)
|
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.cdouble if i.is_complex() else torch.double, device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = cls(mode=mode).to(dtype=self.dtype, device=self.device)
self.assert_grad(convolve, [x, y])
def test_barkspectrogram(self):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~e-16) difference.
sample_rate = 8000
transform = T.BarkSpectrogram(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_barkscale(self):
sample_rate = 8000
n_fft = 400
n_barks = n_fft // 2 + 1
transform = T.BarkScale(sample_rate=sample_rate, n_barks=n_barks)
spec = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
)
self.assert_grad(transform, [spec])
|
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.cdouble if i.is_complex() else torch.double, device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = cls(mode=mode).to(dtype=self.dtype, device=self.device)
self.assert_grad(convolve, [x, y])
|
import time
import unittest
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from transformers.testing_utils import require_flash_attn, require_torch_gpu, slow
_TEST_PROMPTS = [
"A man is a walking his dog down the street, and a the turn he sees",
"Describe a fruit that is of orange color and round. It is a sweet fruit and a great source of Vitamine C. The fruit I'm thinking of is an",
"A plane is flying high in the sky, out of the window are clouds and mountains. Where could the plane be located?",
"Please fill in the form to",
"For safety reasons, the train is stopped in the middle of the",
]
_EXPECTED_OUTPUTS = [
"a woman standing on the sidewalk, looking at him. He is immediately drawn to her and feels a strong attraction. He walks up to her and strikes up a conversation, and they quickly discover that they have a lot in common. They exchange numbers and",
"orange.\n\n## Step 1: Identify the key characteristics of the fruit\nThe fruit is described as being orange in color and round in shape.\n\n## Step 2: Determine the taste and nutritional value of the fruit\nThe fruit is described as sweet",
"This riddle is a classic example of a lateral thinking puzzle, which requires the test-taker to think creatively and consider multiple possibilities. The answer is not a straightforward one, and it requires some lateral thinking to arrive at the correct solution.",
"get in touch with us. We will respond to your message as soon as possible.\n\n[Your Name]\n[Your Email]\n[Your Phone Number]\n[Your Message]\n\nWe are looking forward to hearing from you!\n\n[Insert Contact Information]\n\nNote:",
"track. The train is stopped for 30 minutes. The train is moving at a speed of 60 km/h. How many kilometers does the train travel in 30 minutes?\n## Step 1: Convert the speed from km/h to km/min",
]
@slow
@require_flash_attn
@require_torch_gpu
class TestBatchGeneration(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Llama-3.2-3b-Instruct", torch_dtype="bfloat16", device_map="auto"
).eval()
cls.tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-3b-Instruct", padding_side="left")
if cls.tokenizer.pad_token is None:
cls.tokenizer.pad_token = cls.tokenizer.eos_token
cls.model.config.pad_token_id = cls.model.config.eos_token_id
cls.model.use_cache = False
@parameterized.expand(
[
("eager_paged", 64, 128, 64),
("sdpa_paged", 32, 256, 128),
("paged_attention", 16, 512, 256),
("flex_paged", 64, 128, 64),
]
)
def test_generate_batch_consistency(self, attn_impl, num_blocks, block_size, max_batch_tokens):
self.model.config.attn_implementation = attn_impl
generation_config = GenerationConfig(
max_new_tokens=50,
top_k=0,
eos_token_id=self.tokenizer.eos_token_id,
pad_token_id=self.tokenizer.pad_token_id,
use_cache=False,
num_blocks=num_blocks,
block_size=block_size,
max_batch_tokens=max_batch_tokens,
)
tokenized = self.tokenizer(_TEST_PROMPTS, truncation=True, max_length=512)
batch_inputs = list(tokenized["input_ids"])
start = time.time()
batch_outputs = self.model.generate_batch(
inputs=batch_inputs,
generation_config=generation_config,
)
end = time.time()
print(
f"\n[{attn_impl}] Batch took {end - start:.2f}s with config: blocks={num_blocks}, block_size={block_size}, max_batch_tokens={max_batch_tokens}"
)
for i, req_id in enumerate(batch_outputs):
generated = self.tokenizer.decode(batch_outputs[req_id].static_outputs, skip_special_tokens=False).strip()
expected = _EXPECTED_OUTPUTS[i].strip()
self.assertTrue(
generated.startswith(expected),
msg=f"[{attn_impl}] Mismatch in request {i}:\nExpected start: {expected}\nGot: {generated}",
)
|
import time
import unittest
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from transformers.testing_utils import require_flash_attn, require_torch_gpu, slow
_TEST_PROMPTS = [
"A man is a walking his dog down the street, and a the turn he sees",
"Describe a fruit that is of orange color and round. It is a sweet fruit and a great source of Vitamine C. The fruit I'm thinking of is an",
"A plane is flying high in the sky, out of the window are clouds and mountains. Where could the plane be located?",
"Please fill in the form to",
"For safety reasons, the train is stopped in the middle of the",
]
_EXPECTED_OUTPUTS = [
"a woman standing on the sidewalk, looking at him. He is immediately drawn to her and feels a strong attraction. He walks up to her and strikes up a conversation, and they quickly discover that they have a lot in common. They exchange numbers and",
"orange.\n\n## Step 1: Identify the key characteristics of the fruit\nThe fruit is described as being orange in color and round in shape.\n\n## Step 2: Determine the taste and nutritional value of the fruit\nThe fruit is described as sweet",
"This riddle is a classic example of a lateral thinking puzzle, which requires the test-taker to think creatively and consider multiple possibilities. The answer is not a straightforward one, and it requires some lateral thinking to arrive at the correct solution.",
"get in touch with us. We will respond to your message as soon as possible.\n\n[Your Name]\n[Your Email]\n[Your Phone Number]\n[Your Message]\n\nWe are looking forward to hearing from you!\n\n[Insert Contact Information]\n\nNote:",
"track. The train is stopped for 30 minutes. The train is moving at a speed of 60 km/h. How many kilometers does the train travel in 30 minutes?\n## Step 1: Convert the speed from km/h to km/min",
]
@slow
@require_torch_gpu
@require_flash_attn
class TestBatchGeneration(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Llama-3.2-3b-Instruct", torch_dtype="bfloat16", device_map="auto"
).eval()
cls.tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-3b-Instruct", padding_side="left")
if cls.tokenizer.pad_token is None:
cls.tokenizer.pad_token = cls.tokenizer.eos_token
cls.model.config.pad_token_id = cls.model.config.eos_token_id
cls.model.use_cache = False
@parameterized.expand(
[
("eager_paged", 64, 128, 64),
("sdpa_paged", 32, 256, 128),
("paged_attention", 16, 512, 256),
("flex_paged", 64, 128, 64),
]
)
def test_generate_batch_consistency(self, attn_impl, num_blocks, block_size, max_batch_tokens):
self.model.config.attn_implementation = attn_impl
generation_config = GenerationConfig(
max_new_tokens=50,
top_k=0,
eos_token_id=self.tokenizer.eos_token_id,
pad_token_id=self.tokenizer.pad_token_id,
use_cache=False,
num_blocks=num_blocks,
block_size=block_size,
max_batch_tokens=max_batch_tokens,
)
tokenized = self.tokenizer(_TEST_PROMPTS, truncation=True, max_length=512)
batch_inputs = list(tokenized["input_ids"])
start = time.time()
batch_outputs = self.model.generate_batch(
inputs=batch_inputs,
generation_config=generation_config,
)
end = time.time()
print(
f"\n[{attn_impl}] Batch took {end - start:.2f}s with config: blocks={num_blocks}, block_size={block_size}, max_batch_tokens={max_batch_tokens}"
)
for i, req_id in enumerate(batch_outputs):
generated = self.tokenizer.decode(batch_outputs[req_id].static_outputs, skip_special_tokens=False).strip()
expected = _EXPECTED_OUTPUTS[i].strip()
self.assertTrue(
generated.startswith(expected),
msg=f"[{attn_impl}] Mismatch in request {i}:\nExpected start: {expected}\nGot: {generated}",
)
|
"""Test memory functionality."""
from langchain.memory.summary_buffer import ConversationSummaryBufferMemory
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_summary_buffer_memory_no_buffer_yet() -> None:
"""Test ConversationSummaryBufferMemory when no inputs put in buffer yet."""
memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key="baz")
output = memory.load_memory_variables({})
assert output == {"baz": ""}
async def test_summary_buffer_memory_no_buffer_yet_async() -> None:
"""Test ConversationSummaryBufferMemory when no inputs put in buffer yet."""
memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key="baz")
output = await memory.aload_memory_variables({})
assert output == {"baz": ""}
def test_summary_buffer_memory_buffer_only() -> None:
"""Test ConversationSummaryBufferMemory when only buffer."""
memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key="baz")
memory.save_context({"input": "bar"}, {"output": "foo"})
assert memory.buffer == "Human: bar\nAI: foo"
output = memory.load_memory_variables({})
assert output == {"baz": "Human: bar\nAI: foo"}
async def test_summary_buffer_memory_buffer_only_async() -> None:
"""Test ConversationSummaryBufferMemory when only buffer."""
memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key="baz")
await memory.asave_context({"input": "bar"}, {"output": "foo"})
assert memory.buffer == "Human: bar\nAI: foo"
output = await memory.aload_memory_variables({})
assert output == {"baz": "Human: bar\nAI: foo"}
def test_summary_buffer_memory_summary() -> None:
"""Test ConversationSummaryBufferMemory when only buffer."""
llm = FakeLLM(queries={0: "summary"}, sequential_responses=True)
memory = ConversationSummaryBufferMemory(
llm=llm,
memory_key="baz",
max_token_limit=5,
)
memory.save_context({"input": "bar"}, {"output": "foo"})
memory.save_context({"input": "bar1"}, {"output": "foo1"})
assert memory.buffer == "System: summary\nHuman: bar1\nAI: foo1"
output = memory.load_memory_variables({})
assert output == {"baz": "System: summary\nHuman: bar1\nAI: foo1"}
async def test_summary_buffer_memory_summary_async() -> None:
"""Test ConversationSummaryBufferMemory when only buffer."""
llm = FakeLLM(queries={0: "summary"}, sequential_responses=True)
memory = ConversationSummaryBufferMemory(
llm=llm,
memory_key="baz",
max_token_limit=5,
)
await memory.asave_context({"input": "bar"}, {"output": "foo"})
await memory.asave_context({"input": "bar1"}, {"output": "foo1"})
assert memory.buffer == "System: summary\nHuman: bar1\nAI: foo1"
output = await memory.aload_memory_variables({})
assert output == {"baz": "System: summary\nHuman: bar1\nAI: foo1"}
|
"""Test memory functionality."""
from langchain.memory.summary_buffer import ConversationSummaryBufferMemory
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_summary_buffer_memory_no_buffer_yet() -> None:
"""Test ConversationSummaryBufferMemory when no inputs put in buffer yet."""
memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key="baz")
output = memory.load_memory_variables({})
assert output == {"baz": ""}
async def test_summary_buffer_memory_no_buffer_yet_async() -> None:
"""Test ConversationSummaryBufferMemory when no inputs put in buffer yet."""
memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key="baz")
output = await memory.aload_memory_variables({})
assert output == {"baz": ""}
def test_summary_buffer_memory_buffer_only() -> None:
"""Test ConversationSummaryBufferMemory when only buffer."""
memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key="baz")
memory.save_context({"input": "bar"}, {"output": "foo"})
assert memory.buffer == "Human: bar\nAI: foo"
output = memory.load_memory_variables({})
assert output == {"baz": "Human: bar\nAI: foo"}
async def test_summary_buffer_memory_buffer_only_async() -> None:
"""Test ConversationSummaryBufferMemory when only buffer."""
memory = ConversationSummaryBufferMemory(llm=FakeLLM(), memory_key="baz")
await memory.asave_context({"input": "bar"}, {"output": "foo"})
assert memory.buffer == "Human: bar\nAI: foo"
output = await memory.aload_memory_variables({})
assert output == {"baz": "Human: bar\nAI: foo"}
def test_summary_buffer_memory_summary() -> None:
"""Test ConversationSummaryBufferMemory when only buffer."""
llm = FakeLLM(queries={0: "summary"}, sequential_responses=True)
memory = ConversationSummaryBufferMemory(
llm=llm, memory_key="baz", max_token_limit=5
)
memory.save_context({"input": "bar"}, {"output": "foo"})
memory.save_context({"input": "bar1"}, {"output": "foo1"})
assert memory.buffer == "System: summary\nHuman: bar1\nAI: foo1"
output = memory.load_memory_variables({})
assert output == {"baz": "System: summary\nHuman: bar1\nAI: foo1"}
async def test_summary_buffer_memory_summary_async() -> None:
"""Test ConversationSummaryBufferMemory when only buffer."""
llm = FakeLLM(queries={0: "summary"}, sequential_responses=True)
memory = ConversationSummaryBufferMemory(
llm=llm, memory_key="baz", max_token_limit=5
)
await memory.asave_context({"input": "bar"}, {"output": "foo"})
await memory.asave_context({"input": "bar1"}, {"output": "foo1"})
assert memory.buffer == "System: summary\nHuman: bar1\nAI: foo1"
output = await memory.aload_memory_variables({})
assert output == {"baz": "System: summary\nHuman: bar1\nAI: foo1"}
|
from keras.src import testing
from keras.src.datasets import california_housing
class CaliforniaHousingTest(testing.TestCase):
def test_load_data_large(self):
(x_train, y_train), (x_test, y_test) = california_housing.load_data(
version="large"
)
self.assertEqual(x_train.shape[1], 8)
# Ensure the dataset contains 20,640 samples as documented
self.assertEqual(x_train.shape[0] + x_test.shape[0], 20640)
def test_load_data_small(self):
(x_train, y_train), (x_test, y_test) = california_housing.load_data(
version="small"
)
self.assertEqual(x_train.shape[1], 8)
# Ensure the small dataset contains 600 samples as documented
self.assertEqual(x_train.shape[0] + x_test.shape[0], 600)
def test_invalid_version(self):
with self.assertRaises(ValueError):
california_housing.load_data(version="invalid_version")
def test_seed_reproducibility(self):
# Ensure the data is reproducible with the same seed
seed = 123
first_load = california_housing.load_data(version="large", seed=seed)
second_load = california_housing.load_data(version="large", seed=seed)
self.assertAllClose(first_load[0][0], second_load[0][0])
self.assertAllClose(first_load[1][0], second_load[1][0])
|
from keras.src import testing
from keras.src.datasets import california_housing
class CaliforniaHousingTest(testing.TestCase):
def test_load_data_large(self):
(x_train, y_train), (x_test, y_test) = california_housing.load_data(
version="large"
)
self.assertEqual(x_train.shape[1], 8)
# Ensure the dataset contains 20,640 samples as documented
self.assertEqual(x_train.shape[0] + x_test.shape[0], 20640)
def test_load_data_small(self):
(x_train, y_train), (x_test, y_test) = california_housing.load_data(
version="small"
)
self.assertEqual(x_train.shape[1], 8)
# Ensure the small dataset contains 600 samples as documented
self.assertEqual(x_train.shape[0] + x_test.shape[0], 600)
def test_invalid_version(self):
with self.assertRaises(ValueError):
california_housing.load_data(version="invalid_version")
def test_seed_reproducibility(self):
# Ensure the data is reproducible with the same seed
seed = 123
first_load = california_housing.load_data(version="large", seed=seed)
second_load = california_housing.load_data(version="large", seed=seed)
self.assertAllClose(first_load[0][0], second_load[0][0])
self.assertAllClose(first_load[1][0], second_load[1][0])
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class GaussianNoiseTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_gaussian_noise_basics(self):
self.run_layer_test(
layers.GaussianNoise,
init_kwargs={
"stddev": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_gaussian_noise_correctness(self):
inputs = np.ones((20, 500))
layer = layers.GaussianNoise(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)), 0.3, atol=0.02
)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class GaussianNoiseTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_gaussian_noise_basics(self):
self.run_layer_test(
layers.GaussianNoise,
init_kwargs={
"stddev": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
)
def test_gaussian_noise_correctness(self):
inputs = np.ones((20, 500))
layer = layers.GaussianNoise(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)), 0.3, atol=0.02
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc0'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.3.0'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc0'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.1.0'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
_base_ = './yolov3_d53_mstrain-608_273e_coco.py'
# dataset settings
img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(320, 320), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(320, 320),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = './yolov3_d53_mstrain-608_273e_coco.py'
# dataset settings
img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='PhotoMetricDistortion'),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(320, 320), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(320, 320),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
import os
import subprocess
from pathlib import Path
import click
from llama_dev.utils import find_all_packages, is_llama_index_package
@click.command(short_help="Exec a command inside a package folder")
@click.option(
"--fail-fast",
is_flag=True,
default=False,
help="Exit the command at the first failure",
)
@click.option(
"--all",
is_flag=True,
help="Get info for all the packages in the monorepo",
)
@click.argument("package_names", required=False, nargs=-1)
@click.option(
"--cmd",
required=True,
help="The command to execute (use quotes around the full command)",
)
@click.option(
"--silent",
is_flag=True,
default=False,
help="Only print errors",
)
@click.pass_obj
def cmd_exec(
obj: dict, all: bool, package_names: tuple, cmd: str, fail_fast: bool, silent: bool
):
if not all and not package_names:
raise click.UsageError("Either specify a package name or use the --all flag")
console = obj["console"]
packages: set[Path] = set()
# Do not use the virtual environment calling llama-dev, if any
env = os.environ.copy()
if "VIRTUAL_ENV" in env:
del env["VIRTUAL_ENV"]
if all:
packages = set(find_all_packages(obj["repo_root"]))
else:
for package_name in package_names:
package_path = obj["repo_root"] / package_name
if not is_llama_index_package(package_path):
raise click.UsageError(
f"{package_name} is not a path to a LlamaIndex package"
)
packages.add(package_path)
with console.status(f"[bold green]Running '{cmd}'...") as status:
for package in packages:
result = subprocess.run(
cmd.split(" "),
cwd=package,
text=True,
capture_output=True,
env=env,
)
if result.returncode != 0:
msg = f"Command '{cmd}' failed in {package.relative_to(obj['repo_root'])}: {result.stderr}"
if fail_fast:
raise click.ClickException(msg)
else:
console.print(msg, style="bold red")
else:
if not silent:
console.print(result.stdout)
console.log(
f"Command succeeded in {package.relative_to(obj['repo_root'])}"
)
|
import os
import subprocess
from pathlib import Path
import click
from llama_dev.utils import find_all_packages, is_llama_index_package
@click.command(short_help="Exec a command inside a package folder")
@click.option(
"--fail-fast",
is_flag=True,
default=False,
help="Exit the command at the first failure",
)
@click.option(
"--all",
is_flag=True,
help="Get info for all the packages in the monorepo",
)
@click.argument("package_names", required=False, nargs=-1)
@click.option(
"--cmd",
required=True,
help="The command to execute (use quotes around the full command)",
)
@click.pass_obj
def cmd_exec(obj: dict, all: bool, package_names: tuple, cmd: str, fail_fast: bool):
if not all and not package_names:
raise click.UsageError("Either specify a package name or use the --all flag")
console = obj["console"]
packages: set[Path] = set()
# Do not use the virtual environment calling llama-dev, if any
env = os.environ.copy()
if "VIRTUAL_ENV" in env:
del env["VIRTUAL_ENV"]
if all:
packages = set(find_all_packages(obj["repo_root"]))
else:
for package_name in package_names:
package_path = obj["repo_root"] / package_name
if not is_llama_index_package(package_path):
raise click.UsageError(
f"{package_name} is not a path to a LlamaIndex package"
)
packages.add(package_path)
with console.status(f"[bold green]Running '{cmd}'...") as status:
for package in packages:
result = subprocess.run(
cmd.split(" "), cwd=package, text=True, capture_output=True, env=env
)
if result.returncode != 0:
msg = f"Command '{cmd}' failed in {package}: {result.stderr}"
if fail_fast:
raise click.ClickException(msg)
else:
console.print(msg, style="bold red")
else:
console.print(result.stdout)
console.log(f"Command succeeded in {package}")
|
from typing import Any, Optional, Type, TypeVar, Union
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
from docarray.typing.tensor.embedding import AnyEmbedding
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
from docarray.utils._internal.pydantic import is_pydantic_v2
if is_pydantic_v2:
from pydantic import model_validator
T = TypeVar('T', bound='Mesh3D')
class Mesh3D(BaseDoc):
"""
Document for handling meshes for 3D data representation.
A mesh is a representation for 3D data and contains vertices and faces information.
Vertices are points in a 3D space, represented as a tensor of shape (n_points, 3).
Faces are triangular surfaces that can be defined by three points in 3D space,
corresponding to the three vertices of a triangle. Faces can be represented as a
tensor of shape (n_faces, 3). Each number in that tensor refers to an index of a
vertex in the tensor of vertices.
The Mesh3D Document can contain:
- an [`Mesh3DUrl`][docarray.typing.url.Mesh3DUrl] (`Mesh3D.url`)
- a [`VerticesAndFaces`][docarray.documents.mesh.vertices_and_faces.VerticesAndFaces]
object containing:
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor) of
vertices (`Mesh3D.tensors.vertices`)
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor) of faces (`Mesh3D.tensors.faces`)
- an [`AnyEmbedding`](../../../../api_references/typing/tensor/embedding) (`Mesh3D.embedding`)
- a `bytes` object (`Mesh3D.bytes_`).
You can use this Document directly:
```python
from docarray.documents import Mesh3D
# use it directly
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.tensors = mesh.url.load()
# model = MyEmbeddingModel()
# mesh.embedding = model(mesh.tensors.vertices)
```
You can extend this Document:
```python
from docarray.documents import Mesh3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyMesh3D(Mesh3D):
name: Optional[str] = None
mesh = MyMesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.name = 'my first mesh'
mesh.tensors = mesh.url.load()
# model = MyEmbeddingModel()
# mesh.embedding = model(mesh.vertices)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import Mesh3D, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
mesh: Mesh3D
text: TextDoc
mmdoc = MultiModalDoc(
mesh=Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.mesh.tensors = mmdoc.mesh.url.load()
# or
mmdoc.mesh.bytes_ = mmdoc.mesh.url.load_bytes()
```
You can display your 3D mesh in a notebook from either its url, or its tensors:
```python
from docarray.documents import Mesh3D
# display from url
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
# mesh.url.display()
# display from tensors
mesh.tensors = mesh.url.load()
# mesh.tensors.display()
```
"""
url: Optional[Mesh3DUrl] = Field(
description='URL to a file containing 3D mesh information. Can be remote (web) URL, or a local file path.',
example='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj',
default=None,
)
tensors: Optional[VerticesAndFaces] = Field(
description='A tensor object of 3D mesh of type `VerticesAndFaces`.',
example=[[0, 1, 1], [1, 0, 1], [1, 1, 0]],
default=None,
)
embedding: Optional[AnyEmbedding] = Field(
description='Store an embedding: a vector representation of the 3D mesh.',
default=[1, 0, 1],
)
bytes_: Optional[bytes] = Field(
description='Bytes representation of 3D mesh.',
default=None,
)
if is_pydantic_v2:
@model_validator(mode='before')
@classmethod
def validate_model_before(cls, value):
if isinstance(value, str):
return {'url': value}
return value
else:
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
from docarray.typing.tensor.embedding import AnyEmbedding
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
from docarray.utils._internal.pydantic import is_pydantic_v2
if is_pydantic_v2:
from pydantic import model_validator
T = TypeVar('T', bound='Mesh3D')
class Mesh3D(BaseDoc):
"""
Document for handling meshes for 3D data representation.
A mesh is a representation for 3D data and contains vertices and faces information.
Vertices are points in a 3D space, represented as a tensor of shape (n_points, 3).
Faces are triangular surfaces that can be defined by three points in 3D space,
corresponding to the three vertices of a triangle. Faces can be represented as a
tensor of shape (n_faces, 3). Each number in that tensor refers to an index of a
vertex in the tensor of vertices.
The Mesh3D Document can contain:
- an [`Mesh3DUrl`][docarray.typing.url.Mesh3DUrl] (`Mesh3D.url`)
- a [`VerticesAndFaces`][docarray.documents.mesh.vertices_and_faces.VerticesAndFaces]
object containing:
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor) of
vertices (`Mesh3D.tensors.vertices`)
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor) of faces (`Mesh3D.tensors.faces`)
- an [`AnyEmbedding`](../../../../api_references/typing/tensor/embedding) (`Mesh3D.embedding`)
- a `bytes` object (`Mesh3D.bytes_`).
You can use this Document directly:
```python
from docarray.documents import Mesh3D
# use it directly
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.tensors = mesh.url.load()
# model = MyEmbeddingModel()
# mesh.embedding = model(mesh.tensors.vertices)
```
You can extend this Document:
```python
from docarray.documents import Mesh3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyMesh3D(Mesh3D):
name: Optional[str]
mesh = MyMesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.name = 'my first mesh'
mesh.tensors = mesh.url.load()
# model = MyEmbeddingModel()
# mesh.embedding = model(mesh.vertices)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import Mesh3D, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
mesh: Mesh3D
text: TextDoc
mmdoc = MultiModalDoc(
mesh=Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.mesh.tensors = mmdoc.mesh.url.load()
# or
mmdoc.mesh.bytes_ = mmdoc.mesh.url.load_bytes()
```
You can display your 3D mesh in a notebook from either its url, or its tensors:
```python
from docarray.documents import Mesh3D
# display from url
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
# mesh.url.display()
# display from tensors
mesh.tensors = mesh.url.load()
# mesh.tensors.display()
```
"""
url: Optional[Mesh3DUrl] = Field(
description='URL to a file containing 3D mesh information. Can be remote (web) URL, or a local file path.',
example='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj',
default=None,
)
tensors: Optional[VerticesAndFaces] = Field(
description='A tensor object of 3D mesh of type `VerticesAndFaces`.',
example=[[0, 1, 1], [1, 0, 1], [1, 1, 0]],
default=None,
)
embedding: Optional[AnyEmbedding] = Field(
description='Store an embedding: a vector representation of the 3D mesh.',
default=[1, 0, 1],
)
bytes_: Optional[bytes] = Field(
description='Bytes representation of 3D mesh.',
default=None,
)
if is_pydantic_v2:
@model_validator(mode='before')
@classmethod
def validate_model_before(cls, value):
if isinstance(value, str):
return {'url': value}
return value
else:
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
return super().validate(value)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
from mmcv import Config, DictAction
from mmdet.datasets import build_dataset
from mmdet.utils import update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='Evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
assert args.eval or args.format_only, (
'Please specify at least one operation (eval/format the results) with '
'the argument "--eval", "--format-only"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.pkl_results)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
from mmcv import Config, DictAction
from mmdet.datasets import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='Evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
assert args.eval or args.format_only, (
'Please specify at least one operation (eval/format the results) with '
'the argument "--eval", "--format-only"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.pkl_results)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
|
import os
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.events.rerank import (
ReRankEndEvent,
ReRankStartEvent,
)
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import NodeWithScore, QueryBundle, MetadataMode
dispatcher = get_dispatcher(__name__)
class CohereRerank(BaseNodePostprocessor):
model: str = Field(description="Cohere model name.")
top_n: int = Field(description="Top N nodes to return.")
base_url: Optional[str] = Field(description="Cohere base url.", default=None)
_client: Any = PrivateAttr()
def __init__(
self,
top_n: int = 2,
model: str = "rerank-english-v3.0",
api_key: Optional[str] = None,
base_url: Optional[str] = None,
):
super().__init__(top_n=top_n, model=model)
try:
api_key = api_key or os.environ["COHERE_API_KEY"]
except IndexError:
raise ValueError(
"Must pass in cohere api key or "
"specify via COHERE_API_KEY environment variable "
)
try:
from cohere import Client
except ImportError:
raise ImportError(
"Cannot import cohere package, please `pip install cohere`."
)
self._client = Client(api_key=api_key, base_url=base_url)
@classmethod
def class_name(cls) -> str:
return "CohereRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle, nodes=nodes, top_n=self.top_n, model_name=self.model
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
texts = [
node.node.get_content(metadata_mode=MetadataMode.EMBED)
for node in nodes
]
results = self._client.rerank(
model=self.model,
top_n=self.top_n,
query=query_bundle.query_str,
documents=texts,
)
new_nodes = []
for result in results.results:
new_node_with_score = NodeWithScore(
node=nodes[result.index].node, score=result.relevance_score
)
new_nodes.append(new_node_with_score)
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
|
import os
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.events.rerank import (
ReRankEndEvent,
ReRankStartEvent,
)
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import NodeWithScore, QueryBundle, MetadataMode
dispatcher = get_dispatcher(__name__)
class CohereRerank(BaseNodePostprocessor):
model: str = Field(description="Cohere model name.")
top_n: int = Field(description="Top N nodes to return.")
base_url: Optional[str] = Field(description="Cohere base url.", default=None)
_client: Any = PrivateAttr()
def __init__(
self,
top_n: int = 2,
model: str = "rerank-english-v2.0",
api_key: Optional[str] = None,
base_url: Optional[str] = None,
):
super().__init__(top_n=top_n, model=model)
try:
api_key = api_key or os.environ["COHERE_API_KEY"]
except IndexError:
raise ValueError(
"Must pass in cohere api key or "
"specify via COHERE_API_KEY environment variable "
)
try:
from cohere import Client
except ImportError:
raise ImportError(
"Cannot import cohere package, please `pip install cohere`."
)
self._client = Client(api_key=api_key, base_url=base_url)
@classmethod
def class_name(cls) -> str:
return "CohereRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle, nodes=nodes, top_n=self.top_n, model_name=self.model
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
texts = [
node.node.get_content(metadata_mode=MetadataMode.EMBED)
for node in nodes
]
results = self._client.rerank(
model=self.model,
top_n=self.top_n,
query=query_bundle.query_str,
documents=texts,
)
new_nodes = []
for result in results.results:
new_node_with_score = NodeWithScore(
node=nodes[result.index].node, score=result.relevance_score
)
new_nodes.append(new_node_with_score)
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as
from .normed_predictor import NormedConv2d, NormedLinear
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as
from .normed_predictor import NormedConv2d, NormedLinear
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, Transformer)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.7.2'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.7.1'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from typing import Iterable, Iterator, Union, TYPE_CHECKING
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.storage.milvus.backend import _batch_list, _always_true_expr
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other have the same Milvus Collections for data and offset2id
:param other: the other object to check for equality
:return: `True` if other is equal to self
"""
return (
type(self) is type(other)
and self._collection.name == other._collection.name
and self._offset2id_collection.name == other._offset2id_collection.name
and self._config == other._config
)
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, Document):
x = x.id
try:
self._get_doc_by_id(x)
return True
except:
return False
def __repr__(self):
return f'<DocumentArray[Milvus] (length={len(self)}) at {id(self)}>'
def __add__(self, other: Union['Document', Iterable['Document']]):
if isinstance(other, Document):
self.append(other)
else:
self.extend(other)
return self
def insert(self, index: int, value: 'Document', **kwargs):
self._set_doc_by_id(value.id, value, **kwargs)
self._offset2ids.insert(index, value.id)
def _append(self, value: 'Document', **kwargs):
self._set_doc_by_id(value.id, value, **kwargs)
self._offset2ids.append(value.id)
def _extend(self, values: Iterable['Document'], **kwargs):
docs = list(values)
if not docs:
return
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
kwargs = self._update_kwargs_from_config('batch_size', **kwargs)
for docs_batch in _batch_list(list(docs), kwargs['batch_size']):
payload = self._docs_to_milvus_payload(docs_batch)
self._collection.insert(payload, **kwargs)
self._offset2ids.extend([doc.id for doc in docs_batch])
def __len__(self):
if self._list_like:
return len(self._offset2ids)
else:
# Milvus has no native way to get num of entities
# so only use it as fallback option
with self.loaded_collection():
res = self._collection.query(
expr=_always_true_expr('document_id'),
output_fields=['document_id'],
)
return len(res)
|
from typing import Iterable, Iterator, Union, TYPE_CHECKING
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.storage.milvus.backend import _batch_list
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other have the same Milvus Collections for data and offset2id
:param other: the other object to check for equality
:return: `True` if other is equal to self
"""
return (
type(self) is type(other)
and self._collection.name == other._collection.name
and self._offset2id_collection.name == other._offset2id_collection.name
and self._config == other._config
)
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, Document):
x = x.id
try:
self._get_doc_by_id(x)
return True
except:
return False
def __repr__(self):
return f'<DocumentArray[Milvus] (length={len(self)}) at {id(self)}>'
def __add__(self, other: Union['Document', Iterable['Document']]):
if isinstance(other, Document):
self.append(other)
else:
self.extend(other)
return self
def insert(self, index: int, value: 'Document', **kwargs):
self._set_doc_by_id(value.id, value, **kwargs)
self._offset2ids.insert(index, value.id)
def _append(self, value: 'Document', **kwargs):
self._set_doc_by_id(value.id, value, **kwargs)
self._offset2ids.append(value.id)
def _extend(self, values: Iterable['Document'], **kwargs):
docs = list(values)
if not docs:
return
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
kwargs = self._update_kwargs_from_config('batch_size', **kwargs)
for docs_batch in _batch_list(list(docs), kwargs['batch_size']):
payload = self._docs_to_milvus_payload(docs_batch)
self._collection.insert(payload, **kwargs)
self._offset2ids.extend([doc.id for doc in docs_batch])
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.device import (get_device, is_cuda_available, is_mlu_available,
is_mps_available)
def test_get_device():
device = get_device()
if is_cuda_available():
assert device == 'cuda'
elif is_mlu_available():
assert device == 'mlu'
elif is_mps_available():
assert device == 'mps'
else:
assert device == 'cpu'
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.device import get_device, is_cuda_available, is_mlu_available
def test_get_device():
device = get_device()
if is_cuda_available():
assert device == 'cuda'
elif is_mlu_available():
assert device == 'mlu'
else:
assert device == 'cpu'
|
from docarray.typing.bytes import ImageBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'TensorFlowTensor',
'NdArrayEmbedding',
'ImageBytes',
'ImageTensor',
'ImageNdArray',
]
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
[
'AudioTorchTensor',
'TorchEmbedding',
'TorchTensor',
'VideoTorchTensor',
'ImageTorchTensor',
]
)
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor import TensorFlowTensor
from docarray.typing.tensor.audio import AudioTensorFlowTensor # noqa: F401
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.video import VideoTensorFlowTensor # noqa
__all__.extend(
[
'TensorFlowTensor',
'TensorFlowEmbedding',
'AudioTensorFlowTensor',
'ImageTensorFlowTensor',
'VideoTensorFlowTensor',
]
)
|
from docarray.typing.bytes import ImageBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'TensorFlowTensor',
'NdArrayEmbedding',
'ImageBytes',
'ImageTensor',
'ImageNdArray',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
[
'AudioTorchTensor',
'TorchEmbedding',
'TorchTensor',
'VideoTorchTensor',
'ImageTorchTensor',
]
)
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
pass
else:
from docarray.typing.tensor import TensorFlowTensor # noqa: F401
__all__.extend(['TensorFlowTensor'])
|
# Copyright (c) OpenMMLab. All rights reserved.
from .csp_darknet import CSPDarknet
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .trident_resnet import TridentResNet
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet'
]
|
from .csp_darknet import CSPDarknet
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .hourglass import HourglassNet
from .hrnet import HRNet
from .mobilenet_v2 import MobileNetV2
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .trident_resnet import TridentResNet
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet'
]
|
import pytest
from docarray import BaseDocument
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_to_from_bytes(protocol, compress):
d = MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png'))
assert d.text == 'hello'
assert d.embedding.tolist() == [1, 2, 3, 4, 5]
assert d.image.url == 'aux.png'
bstr = d.to_bytes(protocol=protocol, compress=compress)
d2 = MyDoc.from_bytes(bstr, protocol=protocol, compress=compress)
assert d2.text == 'hello'
assert d2.embedding.tolist() == [1, 2, 3, 4, 5]
assert d2.image.url == 'aux.png'
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_to_from_base64(protocol, compress):
d = MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png'))
assert d.text == 'hello'
assert d.embedding.tolist() == [1, 2, 3, 4, 5]
assert d.image.url == 'aux.png'
bstr = d.to_base64(protocol=protocol, compress=compress)
d2 = MyDoc.from_base64(bstr, protocol=protocol, compress=compress)
assert d2.text == 'hello'
assert d2.embedding.tolist() == [1, 2, 3, 4, 5]
assert d2.image.url == 'aux.png'
|
import pytest
from docarray import BaseDocument
from docarray.typing import NdArray
from docarray.documents import Image
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: Image
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_to_from_bytes(protocol, compress):
d = MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=Image(url='aux.png'))
assert d.text == 'hello'
assert d.embedding.tolist() == [1, 2, 3, 4, 5]
assert d.image.url == 'aux.png'
bstr = d.to_bytes(protocol=protocol, compress=compress)
d2 = MyDoc.from_bytes(bstr, protocol=protocol, compress=compress)
assert d2.text == 'hello'
assert d2.embedding.tolist() == [1, 2, 3, 4, 5]
assert d2.image.url == 'aux.png'
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_to_from_base64(protocol, compress):
d = MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=Image(url='aux.png'))
assert d.text == 'hello'
assert d.embedding.tolist() == [1, 2, 3, 4, 5]
assert d.image.url == 'aux.png'
bstr = d.to_base64(protocol=protocol, compress=compress)
d2 = MyDoc.from_base64(bstr, protocol=protocol, compress=compress)
assert d2.text == 'hello'
assert d2.embedding.tolist() == [1, 2, 3, 4, 5]
assert d2.image.url == 'aux.png'
|
from . import ( # noqa: F401
_extension,
compliance,
datasets,
functional,
io,
kaldi_io,
models,
pipelines,
sox_effects,
transforms,
utils,
)
from ._backend.common import AudioMetaData # noqa
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
def _is_backend_dispatcher_enabled():
import os
return os.getenv("TORCHAUDIO_USE_BACKEND_DISPATCHER", default="1") == "1"
if _is_backend_dispatcher_enabled():
from ._backend import _init_backend, get_audio_backend, list_audio_backends, set_audio_backend
else:
from .backend import _init_backend, get_audio_backend, list_audio_backends, set_audio_backend
_init_backend()
# for backward compatibility. This has to happen after _backend is imported.
from . import backend # noqa: F401
__all__ = [
"AudioMetaData",
"io",
"compliance",
"datasets",
"functional",
"models",
"pipelines",
"kaldi_io",
"utils",
"sox_effects",
"transforms",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
|
from . import ( # noqa: F401
_extension,
compliance,
datasets,
functional,
io,
kaldi_io,
models,
pipelines,
sox_effects,
transforms,
utils,
)
from .backend.common import AudioMetaData
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
def _is_backend_dispatcher_enabled():
import os
return os.getenv("TORCHAUDIO_USE_BACKEND_DISPATCHER", default="1") == "1"
if _is_backend_dispatcher_enabled():
from ._backend import _init_backend, get_audio_backend, list_audio_backends, set_audio_backend
else:
from .backend import _init_backend, get_audio_backend, list_audio_backends, set_audio_backend
_init_backend()
__all__ = [
"AudioMetaData",
"io",
"compliance",
"datasets",
"functional",
"models",
"pipelines",
"kaldi_io",
"utils",
"sox_effects",
"transforms",
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hook import Hook
from .iter_timer_hook import IterTimerHook
__all__ = ['Hook', 'IterTimerHook']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hook import Hook
__all__ = ['Hook']
|
from __future__ import annotations
from typing import Any
from langchain_core._api import deprecated
from langchain_core.caches import BaseCache as BaseCache # For model_rebuild
from langchain_core.callbacks import Callbacks as Callbacks # For model_rebuild
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from langchain_core.utils import pre_init
from pydantic import BaseModel
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import SUMMARY_PROMPT
@deprecated(
since="0.2.12",
removal="1.0",
message=(
"Refer here for how to incorporate summaries of conversation history: "
"https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/"
),
)
class SummarizerMixin(BaseModel):
"""Mixin for summarizer."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
prompt: BasePromptTemplate = SUMMARY_PROMPT
summary_message_cls: type[BaseMessage] = SystemMessage
def predict_new_summary(
self, messages: list[BaseMessage], existing_summary: str
) -> str:
new_lines = get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
chain = LLMChain(llm=self.llm, prompt=self.prompt)
return chain.predict(summary=existing_summary, new_lines=new_lines)
async def apredict_new_summary(
self, messages: list[BaseMessage], existing_summary: str
) -> str:
new_lines = get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
chain = LLMChain(llm=self.llm, prompt=self.prompt)
return await chain.apredict(summary=existing_summary, new_lines=new_lines)
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
"""Continually summarizes the conversation history.
The summary is updated after each conversation turn.
The implementations returns a summary of the conversation history which
can be used to provide context to the model.
"""
buffer: str = ""
memory_key: str = "history" #: :meta private:
@classmethod
def from_messages(
cls,
llm: BaseLanguageModel,
chat_memory: BaseChatMessageHistory,
*,
summarize_step: int = 2,
**kwargs: Any,
) -> ConversationSummaryMemory:
obj = cls(llm=llm, chat_memory=chat_memory, **kwargs)
for i in range(0, len(obj.chat_memory.messages), summarize_step):
obj.buffer = obj.predict_new_summary(
obj.chat_memory.messages[i : i + summarize_step], obj.buffer
)
return obj
@property
def memory_variables(self) -> list[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Return history buffer."""
if self.return_messages:
buffer: Any = [self.summary_message_cls(content=self.buffer)]
else:
buffer = self.buffer
return {self.memory_key: buffer}
@pre_init
def validate_prompt_input_variables(cls, values: dict) -> dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
msg = (
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
raise ValueError(msg)
return values
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self.buffer = self.predict_new_summary(
self.chat_memory.messages[-2:], self.buffer
)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.buffer = ""
ConversationSummaryMemory.model_rebuild()
|
from __future__ import annotations
from typing import Any
from langchain_core._api import deprecated
from langchain_core.caches import BaseCache as BaseCache # For model_rebuild
from langchain_core.callbacks import Callbacks as Callbacks # For model_rebuild
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from langchain_core.utils import pre_init
from pydantic import BaseModel
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import SUMMARY_PROMPT
@deprecated(
since="0.2.12",
removal="1.0",
message=(
"Refer here for how to incorporate summaries of conversation history: "
"https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/" # noqa: E501
),
)
class SummarizerMixin(BaseModel):
"""Mixin for summarizer."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
prompt: BasePromptTemplate = SUMMARY_PROMPT
summary_message_cls: type[BaseMessage] = SystemMessage
def predict_new_summary(
self, messages: list[BaseMessage], existing_summary: str
) -> str:
new_lines = get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
chain = LLMChain(llm=self.llm, prompt=self.prompt)
return chain.predict(summary=existing_summary, new_lines=new_lines)
async def apredict_new_summary(
self, messages: list[BaseMessage], existing_summary: str
) -> str:
new_lines = get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
chain = LLMChain(llm=self.llm, prompt=self.prompt)
return await chain.apredict(summary=existing_summary, new_lines=new_lines)
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
"""Continually summarizes the conversation history.
The summary is updated after each conversation turn.
The implementations returns a summary of the conversation history which
can be used to provide context to the model.
"""
buffer: str = ""
memory_key: str = "history" #: :meta private:
@classmethod
def from_messages(
cls,
llm: BaseLanguageModel,
chat_memory: BaseChatMessageHistory,
*,
summarize_step: int = 2,
**kwargs: Any,
) -> ConversationSummaryMemory:
obj = cls(llm=llm, chat_memory=chat_memory, **kwargs)
for i in range(0, len(obj.chat_memory.messages), summarize_step):
obj.buffer = obj.predict_new_summary(
obj.chat_memory.messages[i : i + summarize_step], obj.buffer
)
return obj
@property
def memory_variables(self) -> list[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Return history buffer."""
if self.return_messages:
buffer: Any = [self.summary_message_cls(content=self.buffer)]
else:
buffer = self.buffer
return {self.memory_key: buffer}
@pre_init
def validate_prompt_input_variables(cls, values: dict) -> dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
msg = (
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
raise ValueError(msg)
return values
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self.buffer = self.predict_new_summary(
self.chat_memory.messages[-2:], self.buffer
)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.buffer = ""
ConversationSummaryMemory.model_rebuild()
|
from __future__ import annotations
import csv
import logging
import os
from typing import TYPE_CHECKING
import torch
from torch.utils.data import DataLoader
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.util import batch_to_device
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
Args:
dataloader (DataLoader): the data for the evaluation
"""
super().__init__()
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
self.primary_metric = "accuracy"
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info(f"Accuracy: {accuracy:.4f} ({correct}/{total})\n")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
metrics = {"accuracy": accuracy}
metrics = self.prefix_name_to_metrics(metrics, self.name)
self.store_metrics_in_model_card_data(model, metrics)
return metrics
|
from __future__ import annotations
import csv
import logging
import os
from typing import TYPE_CHECKING
import torch
from torch.utils.data import DataLoader
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.util import batch_to_device
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
Args:
dataloader (DataLoader): the data for the evaluation
"""
super().__init__()
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
self.primary_metric = "accuracy"
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info("Accuracy: {:.4f} ({}/{})\n".format(accuracy, correct, total))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
metrics = {"accuracy": accuracy}
metrics = self.prefix_name_to_metrics(metrics, self.name)
self.store_metrics_in_model_card_data(model, metrics)
return metrics
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat, ConvertColorSpace, ConvertDtype, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
RemoveSmallBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import Grayscale, RandomGrayscale, ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat, ConvertColorSpace, ConvertDtype, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
RemoveSmallBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import Grayscale, RandomGrayscale, ToTensor # usort: skip
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import SCNetRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestSCNetRoIHead(TestCase):
@parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init scnet RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
assert roi_head.with_semantic
assert roi_head.with_feat_relay
assert roi_head.with_glbctx
@parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])
def test_scnet_roi_head_loss(self, cfg_file):
"""Tests htc roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
with_semantic=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
with_semantic=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
@parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])
def test_scnet_roi_head_predict(self, cfg_file):
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
results = roi_head.predict(
feats, proposal_list, batch_data_samples, rescale=True)
self.assertEqual(results[0].masks.shape[-2:], (s, s))
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import SCNetRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestSCNetRoIHead(TestCase):
@parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init scnet RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
assert roi_head.with_semantic
assert roi_head.with_feat_relay
assert roi_head.with_glbctx
@parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])
def test_scnet_roi_head_loss(self, cfg_file):
"""Tests htc roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
with_semantic=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
with_semantic=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
@parameterized.expand(['scnet/scnet_r50_fpn_1x_coco.py'])
def test_scnet_roi_head_predict(self, cfg_file):
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 256, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
results = roi_head.predict(
feats, proposal_list, batch_data_samples, rescale=True)
self.assertEqual(results[0].masks.shape[-2:], (s, s))
|
import asyncio
import json
import logging
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Any, AsyncGenerator, Generator, Generic, Optional, TypeVar
from pydantic import BaseModel
from redis.asyncio.client import PubSub as AsyncPubSub
from redis.client import PubSub
from backend.data import redis
logger = logging.getLogger(__name__)
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return super().default(o)
M = TypeVar("M", bound=BaseModel)
class BaseRedisEventBus(Generic[M], ABC):
Model: type[M]
@property
@abstractmethod
def event_bus_name(self) -> str:
pass
def _serialize_message(self, item: M, channel_key: str) -> tuple[str, str]:
message = json.dumps(item.model_dump(), cls=DateTimeEncoder)
channel_name = f"{self.event_bus_name}/{channel_key}"
logger.info(f"[{channel_name}] Publishing an event to Redis {message}")
return message, channel_name
def _deserialize_message(self, msg: Any, channel_key: str) -> M | None:
message_type = "pmessage" if "*" in channel_key else "message"
if msg["type"] != message_type:
return None
try:
data = json.loads(msg["data"])
logger.info(f"Consuming an event from Redis {data}")
return self.Model(**data)
except Exception as e:
logger.error(f"Failed to parse event result from Redis {msg} {e}")
def _get_pubsub_channel(
self, connection: redis.Redis | redis.AsyncRedis, channel_key: str
) -> tuple[PubSub | AsyncPubSub, str]:
full_channel_name = f"{self.event_bus_name}/{channel_key}"
pubsub = connection.pubsub()
return pubsub, full_channel_name
class RedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
def connection(self) -> redis.Redis:
return redis.get_redis()
def publish_event(self, event: M, channel_key: str):
message, full_channel_name = self._serialize_message(event, channel_key)
self.connection.publish(full_channel_name, message)
def listen_events(self, channel_key: str) -> Generator[M, None, None]:
pubsub, full_channel_name = self._get_pubsub_channel(
self.connection, channel_key
)
assert isinstance(pubsub, PubSub)
if "*" in channel_key:
pubsub.psubscribe(full_channel_name)
else:
pubsub.subscribe(full_channel_name)
for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
class AsyncRedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
async def connection(self) -> redis.AsyncRedis:
return await redis.get_redis_async()
async def publish_event(self, event: M, channel_key: str):
message, full_channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(full_channel_name, message)
async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
pubsub, full_channel_name = self._get_pubsub_channel(
await self.connection, channel_key
)
assert isinstance(pubsub, AsyncPubSub)
if "*" in channel_key:
await pubsub.psubscribe(full_channel_name)
else:
await pubsub.subscribe(full_channel_name)
async for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
async def wait_for_event(
self, channel_key: str, timeout: Optional[float] = None
) -> M | None:
try:
return await asyncio.wait_for(
anext(aiter(self.listen_events(channel_key))), timeout
)
except TimeoutError:
return None
|
import json
import logging
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Any, AsyncGenerator, Generator, Generic, TypeVar
from pydantic import BaseModel
from redis.asyncio.client import PubSub as AsyncPubSub
from redis.client import PubSub
from backend.data import redis
logger = logging.getLogger(__name__)
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return super().default(o)
M = TypeVar("M", bound=BaseModel)
class BaseRedisEventBus(Generic[M], ABC):
Model: type[M]
@property
@abstractmethod
def event_bus_name(self) -> str:
pass
def _serialize_message(self, item: M, channel_key: str) -> tuple[str, str]:
message = json.dumps(item.model_dump(), cls=DateTimeEncoder)
channel_name = f"{self.event_bus_name}/{channel_key}"
logger.info(f"[{channel_name}] Publishing an event to Redis {message}")
return message, channel_name
def _deserialize_message(self, msg: Any, channel_key: str) -> M | None:
message_type = "pmessage" if "*" in channel_key else "message"
if msg["type"] != message_type:
return None
try:
data = json.loads(msg["data"])
logger.info(f"Consuming an event from Redis {data}")
return self.Model(**data)
except Exception as e:
logger.error(f"Failed to parse event result from Redis {msg} {e}")
def _subscribe(
self, connection: redis.Redis | redis.AsyncRedis, channel_key: str
) -> tuple[PubSub | AsyncPubSub, str]:
channel_name = f"{self.event_bus_name}/{channel_key}"
pubsub = connection.pubsub()
return pubsub, channel_name
class RedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
def connection(self) -> redis.Redis:
return redis.get_redis()
def publish_event(self, event: M, channel_key: str):
message, channel_name = self._serialize_message(event, channel_key)
self.connection.publish(channel_name, message)
def listen_events(self, channel_key: str) -> Generator[M, None, None]:
pubsub, channel_name = self._subscribe(self.connection, channel_key)
assert isinstance(pubsub, PubSub)
if "*" in channel_key:
pubsub.psubscribe(channel_name)
else:
pubsub.subscribe(channel_name)
for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
class AsyncRedisEventBus(BaseRedisEventBus[M], ABC):
Model: type[M]
@property
async def connection(self) -> redis.AsyncRedis:
return await redis.get_redis_async()
async def publish_event(self, event: M, channel_key: str):
message, channel_name = self._serialize_message(event, channel_key)
connection = await self.connection
await connection.publish(channel_name, message)
async def listen_events(self, channel_key: str) -> AsyncGenerator[M, None]:
pubsub, channel_name = self._subscribe(await self.connection, channel_key)
assert isinstance(pubsub, AsyncPubSub)
if "*" in channel_key:
await pubsub.psubscribe(channel_name)
else:
await pubsub.subscribe(channel_name)
async for message in pubsub.listen():
if event := self._deserialize_message(message, channel_key):
yield event
|
"""
This directory contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
"""
from __future__ import annotations
from .InputExample import InputExample
from .LabelSentenceReader import LabelSentenceReader
from .NLIDataReader import NLIDataReader
from .STSDataReader import STSBenchmarkDataReader, STSDataReader
from .TripletReader import TripletReader
__all__ = [
"InputExample",
"LabelSentenceReader",
"NLIDataReader",
"STSDataReader",
"STSBenchmarkDataReader",
"TripletReader",
]
|
from __future__ import annotations
from .InputExample import InputExample
from .LabelSentenceReader import LabelSentenceReader
from .NLIDataReader import NLIDataReader
from .STSDataReader import STSBenchmarkDataReader, STSDataReader
from .TripletReader import TripletReader
__all__ = [
"InputExample",
"LabelSentenceReader",
"NLIDataReader",
"STSDataReader",
"STSBenchmarkDataReader",
"TripletReader",
]
|
from typing import Optional
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
@_register_proto(proto_type_name='text_url')
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local file path.
"""
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import TextUrl
class MyDoc(BaseDocument):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt = doc.remote_url.load()
print(remote_txt)
# prints: ```<!DOCTYPE html>\n<html class="client-nojs" ... > ...```
local_txt = doc.local_url.load()
print(local_txt)
# prints content of my_file.txt
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = self.load_bytes(timeout=timeout)
return _bytes.decode(charset)
|
from typing import Optional
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.helper import _uri_to_blob
@_register_proto(proto_type_name='text_url')
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local file path.
"""
def load_to_bytes(self, timeout: Optional[float] = None) -> bytes:
"""
Load the text file into a bytes object.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import TextUrl
class MyDoc(BaseDocument):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt_bytes = doc.remote_url.load_to_bytes()
local_txt_bytes = doc.local_url.load_to_bytes()
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: the text file content as bytes
"""
return _uri_to_blob(self, timeout=timeout)
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import TextUrl
class MyDoc(BaseDocument):
remote_url: TextUrl
local_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
local_url='home/username/my_file.txt',
)
remote_txt = doc.remote_url.load()
print(remote_txt)
# prints: ```<!DOCTYPE html>\n<html class="client-nojs" ... > ...```
local_txt = doc.local_url.load()
print(local_txt)
# prints content of my_file.txt
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = _uri_to_blob(self, timeout=timeout)
return _bytes.decode(charset)
|
from typing import Dict, Union
import torch
import transformers
from PIL import Image
from torch import nn
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super(CLIPModel, self).__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
def forward(self, features: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: Union[str, bool] = True) -> Dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, return_tensors="pt", padding=padding)
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str) -> None:
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str) -> "CLIPModel":
return CLIPModel(model_name=input_path)
|
from typing import Union
import torch
import transformers
from PIL import Image
from torch import nn
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None):
super(CLIPModel, self).__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self):
return "CLIPModel()"
def forward(self, features):
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: Union[str, bool] = True):
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, return_tensors="pt", padding=padding)
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self):
return self.processor
def save(self, output_path: str):
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str):
return CLIPModel(model_name=input_path)
|
"""Load Documents from a set of persistent Steamship Files."""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SteamshipFileReader(BaseReader):
"""
Reads persistent Steamship Files and converts them to Documents.
Args:
api_key: Steamship API key. Defaults to STEAMSHIP_API_KEY value if not provided.
Note:
Requires install of `steamship` package and an active Steamship API Key.
To get a Steamship API Key, visit: https://steamship.com/account/api.
Once you have an API Key, expose it via an environment variable named
`STEAMSHIP_API_KEY` or pass it as an init argument (`api_key`).
"""
def __init__(self, api_key: Optional[str] = None) -> None:
"""Initialize the Reader."""
try:
import steamship # noqa
self.api_key = api_key
except ImportError:
raise ImportError(
"`steamship` must be installed to use the SteamshipFileReader.\n"
"Please run `pip install --upgrade steamship."
)
def load_data(
self,
workspace: str,
query: Optional[str] = None,
file_handles: Optional[List[str]] = None,
collapse_blocks: bool = True,
join_str: str = "\n\n",
) -> List[Document]:
"""
Load data from persistent Steamship Files into Documents.
Args:
workspace: the handle for a Steamship workspace
(see: https://docs.steamship.com/workspaces/index.html)
query: a Steamship tag query for retrieving files
(ex: 'filetag and value("import-id")="import-001"')
file_handles: a list of Steamship File handles
(ex: `smooth-valley-9kbdr`)
collapse_blocks: whether to merge individual File Blocks into a
single Document, or separate them.
join_str: when collapse_blocks is True, this is how the block texts
will be concatenated.
Note:
The collection of Files from both `query` and `file_handles` will be
combined. There is no (current) support for deconflicting the collections
(meaning that if a file appears both in the result set of the query and
as a handle in file_handles, it will be loaded twice).
"""
from steamship import File, Steamship
client = Steamship(workspace=workspace, api_key=self.api_key)
files = []
if query:
files_from_query = File.query(client=client, tag_filter_query=query).files
files.extend(files_from_query)
if file_handles:
files.extend([File.get(client=client, handle=h) for h in file_handles])
docs = []
for file in files:
metadata = {"source": file.handle}
for tag in file.tags:
metadata[tag.kind] = tag.value
if collapse_blocks:
text = join_str.join([b.text for b in file.blocks])
docs.append(Document(text=text, id_=file.handle, metadata=metadata))
else:
docs.extend(
[
Document(text=b.text, id_=file.handle, metadata=metadata)
for b in file.blocks
]
)
return docs
|
"""Load Documents from a set of persistent Steamship Files."""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SteamshipFileReader(BaseReader):
"""Reads persistent Steamship Files and converts them to Documents.
Args:
api_key: Steamship API key. Defaults to STEAMSHIP_API_KEY value if not provided.
Note:
Requires install of `steamship` package and an active Steamship API Key.
To get a Steamship API Key, visit: https://steamship.com/account/api.
Once you have an API Key, expose it via an environment variable named
`STEAMSHIP_API_KEY` or pass it as an init argument (`api_key`).
"""
def __init__(self, api_key: Optional[str] = None) -> None:
"""Initialize the Reader."""
try:
import steamship # noqa
self.api_key = api_key
except ImportError:
raise ImportError(
"`steamship` must be installed to use the SteamshipFileReader.\n"
"Please run `pip install --upgrade steamship."
)
def load_data(
self,
workspace: str,
query: Optional[str] = None,
file_handles: Optional[List[str]] = None,
collapse_blocks: bool = True,
join_str: str = "\n\n",
) -> List[Document]:
"""Load data from persistent Steamship Files into Documents.
Args:
workspace: the handle for a Steamship workspace
(see: https://docs.steamship.com/workspaces/index.html)
query: a Steamship tag query for retrieving files
(ex: 'filetag and value("import-id")="import-001"')
file_handles: a list of Steamship File handles
(ex: `smooth-valley-9kbdr`)
collapse_blocks: whether to merge individual File Blocks into a
single Document, or separate them.
join_str: when collapse_blocks is True, this is how the block texts
will be concatenated.
Note:
The collection of Files from both `query` and `file_handles` will be
combined. There is no (current) support for deconflicting the collections
(meaning that if a file appears both in the result set of the query and
as a handle in file_handles, it will be loaded twice).
"""
from steamship import File, Steamship
client = Steamship(workspace=workspace, api_key=self.api_key)
files = []
if query:
files_from_query = File.query(client=client, tag_filter_query=query).files
files.extend(files_from_query)
if file_handles:
files.extend([File.get(client=client, handle=h) for h in file_handles])
docs = []
for file in files:
metadata = {"source": file.handle}
for tag in file.tags:
metadata[tag.kind] = tag.value
if collapse_blocks:
text = join_str.join([b.text for b in file.blocks])
docs.append(Document(text=text, id_=file.handle, metadata=metadata))
else:
docs.extend(
[
Document(text=b.text, id_=file.handle, metadata=metadata)
for b in file.blocks
]
)
return docs
|
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""
SPLADE Pooling module for creating the sparse embeddings.
This module implements the SPLADE pooling mechanism that:
1. Takes token logits from a masked language model (MLM).
2. Applies a sparse transformation using an activation function followed by log1p (i.e., log(1 + activation(MLM_logits))).
3. Applies a pooling strategy `max` or `sum` to produce sparse embeddings.
The resulting embeddings are highly sparse and capture lexical information,
making them suitable for efficient information retrieval.
Args:
pooling_strategy (str): Pooling method across token dimensions.
Choices:
- `sum`: Sum pooling (used in original SPLADE see https://arxiv.org/pdf/2107.05720).
- `max`: Max pooling (used in SPLADEv2 and later models see https://arxiv.org/pdf/2109.10086 or https://arxiv.org/pdf/2205.04733).
activation_function (str): Activation function applied before log1p transformation.
Choices:
- `relu`: ReLU activation (standard in all Splade models).
- `log1p_relu`: log(1 + ReLU(x)) variant used in Opensearch Splade models see arxiv.org/pdf/2504.14839.
word_embedding_dimension (int, optional): Dimensionality of the output embeddings (if needed).
"""
SPLADE_POOLING_MODES = ("sum", "max")
SPLADE_ACTIVATION = ["relu", "log1p_relu"]
def __init__(
self, pooling_strategy: str = "max", activation_function="relu", word_embedding_dimension: int = None
) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.activation_function = activation_function
if activation_function not in self.SPLADE_ACTIVATION:
raise ValueError("activation_function must be either 'relu' or 'log1p_relu'")
self.config_keys = ["pooling_strategy", "activation_function", "word_embedding_dimension"]
self.word_embedding_dimension = word_embedding_dimension # This will be set in the forward method
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the model.
Args:
features: Dictionary containing input features with 'token_embeddings' key as MLM logits.
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["token_embeddings"]
# Apply ReLU and log transformation for SPLADE
if self.activation_function == "relu":
splade_scores = torch.log1p(torch.relu(mlm_logits))
elif self.activation_function == "log1p_relu":
splade_scores = torch.log1p(torch.log1p(torch.relu(mlm_logits)))
else:
raise ValueError("activation_function must be either 'relu' or 'log1p_relu'")
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
# Set the word embedding dimension
if self.word_embedding_dimension is None:
self.word_embedding_dimension = pooled_scores.shape[1]
features["sentence_embedding"] = pooled_scores
return features
def get_config_dict(self) -> dict[str, Any]:
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path) -> SpladePooling:
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return SpladePooling(**config)
def __repr__(self) -> str:
return f"SpladePooling({self.get_config_dict()})"
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the sentence embedding.
Returns:
int: Dimension of the sentence embedding
"""
return self.word_embedding_dimension
|
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""
SPLADE Pooling module for creating the sparse embeddings.
This module implements the SPLADE pooling mechanism that:
1. Takes token logits from a masked language model (MLM)
2. Applies a sparse transformation using the activation function like this log(1 + activation(MLM_logits))
3. Applies a pooling strategy (max or sum) to produce sparse embeddings
The resulting embeddings are highly sparse and capture lexical information,
making them suitable for efficient information retrieval.
Args:
pooling_strategy (str): The pooling strategy to use, either "max" or "sum".
"max" takes the maximum value across all tokens.
"sum" adds the values across all tokens.
activation_function (str): The activation function to use, either "relu" or "log1p_relu".
"relu" applies the ReLU activation function.
"log1p_relu" applies the log(1 + exp(x)) transformation.
"""
SPLADE_POOLING_MODES = ("sum", "max")
SPLADE_ACTIVATION = ["relu", "log1p_relu"]
def __init__(
self, pooling_strategy: str = "max", activation_function="relu", word_embedding_dimension: int = None
) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.activation_function = activation_function
if activation_function not in self.SPLADE_ACTIVATION:
raise ValueError("activation_function must be either 'relu' or 'log1p_relu'")
self.config_keys = ["pooling_strategy", "activation_function", "word_embedding_dimension"]
self.word_embedding_dimension = word_embedding_dimension # This will be set in the forward method
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the model.
Args:
features: Dictionary containing input features with 'mlm_logits' key
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["token_embeddings"]
# Apply ReLU and log transformation for SPLADE
if self.activation_function == "relu":
splade_scores = torch.log1p(torch.relu(mlm_logits))
elif self.activation_function == "log1p_relu":
splade_scores = torch.log1p(torch.log1p(torch.relu(mlm_logits)))
else:
raise ValueError("activation_function must be either 'relu' or 'log1p_relu'")
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
# Set the word embedding dimension
if self.word_embedding_dimension is None:
self.word_embedding_dimension = pooled_scores.shape[1]
features["sentence_embedding"] = pooled_scores
return features
def get_config_dict(self) -> dict[str, Any]:
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path) -> SpladePooling:
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return SpladePooling(**config)
def __repr__(self) -> str:
return f"SpladePooling({self.get_config_dict()})"
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the sentence embedding.
Returns:
int: Dimension of the sentence embedding
"""
return self.word_embedding_dimension
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
METAINFO = {
'classes':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
# palette is a list of color tuples, which is used for visualization.
'palette': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
}
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/VOCdevkit/'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
_delete_=True,
type=dataset_type,
data_root=data_root,
ann_file='annotations/voc0712_trainval.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args={{_base_.backend_args}})))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/voc07_test.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/voc07_test.json',
metric='bbox',
format_only=False,
backend_args={{_base_.backend_args}})
test_evaluator = val_evaluator
# training schedule, the dataset is repeated 3 times, so the
# actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
METAINFO = {
'classes':
('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
# palette is a list of color tuples, which is used for visualization.
'palette': [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),
(197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),
(153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),
(182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),
(0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
}
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/VOCdevkit/'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
_delete_=True,
type=dataset_type,
data_root=data_root,
ann_file='annotations/voc0712_trainval.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/voc07_test.json',
data_prefix=dict(img=''),
metainfo=METAINFO,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/voc07_test.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
# training schedule, the dataset is repeated 3 times, so the
# actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.ops.core import _saturate_cast
@keras_export("keras.layers.AutoContrast")
class AutoContrast(BaseImagePreprocessingLayer):
"""Performs the auto-contrast operation on an image.
Auto contrast stretches the values of an image across the entire available
`value_range`. This makes differences between pixels more obvious. An
example of this is if an image only has values `[0, 1]` out of the range
`[0, 255]`, auto contrast will change the `1` values to be `255`.
This layer is active at both training and inference time.
Args:
value_range: Range of values the incoming images will have.
Represented as a two number tuple written `(low, high)`.
This is typically either `(0, 1)` or `(0, 255)` depending
on how your preprocessing pipeline is set up.
Defaults to `(0, 255)`.
"""
_USE_BASE_FACTOR = False
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(
self,
value_range=(0, 255),
**kwargs,
):
super().__init__(**kwargs)
self._set_value_range(value_range)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def transform_images(self, images, transformation=None, training=True):
original_images = images
images = self._transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
images = self.backend.cast(images, self.compute_dtype)
low = self.backend.numpy.min(images, axis=(1, 2), keepdims=True)
high = self.backend.numpy.max(images, axis=(1, 2), keepdims=True)
scale = 255.0 / (high - low)
offset = -low * scale
images = images * scale + offset
results = self.backend.numpy.clip(images, 0.0, 255.0)
results = self._transform_value_range(
results,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
# don't process NaN channels
results = self.backend.numpy.where(
self.backend.numpy.isnan(results), original_images, results
)
if results.dtype == images.dtype:
return results
if backend.is_int_dtype(images.dtype):
results = self.backend.numpy.round(results)
return _saturate_cast(results, images.dtype, self.backend)
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"value_range": self.value_range})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.ops.core import _saturate_cast
@keras_export("keras.layers.AutoContrast")
class AutoContrast(BaseImagePreprocessingLayer):
"""Performs the auto-contrast operation on an image.
Auto contrast stretches the values of an image across the entire available
`value_range`. This makes differences between pixels more obvious. An
example of this is if an image only has values `[0, 1]` out of the range
`[0, 255]`, auto contrast will change the `1` values to be `255`.
This layer is active at both training and inference time.
Args:
value_range: Range of values the incoming images will have.
Represented as a two number tuple written `(low, high)`.
This is typically either `(0, 1)` or `(0, 255)` depending
on how your preprocessing pipeline is set up.
Defaults to `(0, 255)`.
"""
_USE_BASE_FACTOR = False
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(
self,
value_range=(0, 255),
**kwargs,
):
super().__init__(**kwargs)
self._set_value_range(value_range)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def transform_images(self, images, transformation=None, training=True):
original_images = images
images = self._transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
images = self.backend.cast(images, self.compute_dtype)
low = self.backend.numpy.min(images, axis=(1, 2), keepdims=True)
high = self.backend.numpy.max(images, axis=(1, 2), keepdims=True)
scale = 255.0 / (high - low)
offset = -low * scale
images = images * scale + offset
results = self.backend.numpy.clip(images, 0.0, 255.0)
results = self._transform_value_range(
results,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
# don't process NaN channels
results = self.backend.numpy.where(
self.backend.numpy.isnan(results), original_images, results
)
if results.dtype == images.dtype:
return results
if backend.is_int_dtype(images.dtype):
results = self.backend.numpy.round(results)
return _saturate_cast(results, images.dtype, self.backend)
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"value_range": self.value_range})
return config
|
import multiprocessing
from copy import deepcopy
from functools import partial
from typing import TYPE_CHECKING
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.parsers.helper import _update_gateway_args
if TYPE_CHECKING: # pragma: no cover
from argparse import Namespace
def _get_event(obj) -> multiprocessing.Event:
if isinstance(obj, multiprocessing.Process) or isinstance(
obj, multiprocessing.context.ForkProcess
):
return multiprocessing.Event()
elif isinstance(obj, multiprocessing.context.SpawnProcess):
return multiprocessing.get_context('spawn').Event()
else:
raise TypeError(f'{obj} is not an instance of "multiprocessing.Process"')
class ConditionalEvent:
"""
:class:`ConditionalEvent` provides a common interface to an event (multiprocessing or threading event)
that gets triggered when any of the events provided in input is triggered (OR logic)
:param events_list: The list of events that compose this composable event
"""
def __init__(self, events_list):
super().__init__()
self.event = None
self.event = multiprocessing.synchronize.Event(
ctx=multiprocessing.get_context()
)
self.event_list = events_list
for e in events_list:
self._setup(e, self._state_changed)
self._state_changed()
def _state_changed(self):
bools = [e.is_set() for e in self.event_list]
if any(bools):
self.event.set()
else:
self.event.clear()
def _custom_set(self, e):
e._set()
e._state_changed()
def _custom_clear(self, e):
e._clear()
e._state_changed()
def _setup(self, e, changed_callback):
e._set = e.set
e._clear = e.clear
e._state_changed = changed_callback
e.set = partial(self._custom_set, e)
e.clear = partial(self._custom_clear, e)
def update_runtime_cls(args) -> 'Namespace':
"""Get runtime_cls as a string from args
:param args: pod/deployment namespace args
:return: runtime class as a string
"""
_args = args
if _args.runtime_cls == 'WorkerRuntime' and is_valid_huburi(_args.uses):
_hub_args = deepcopy(_args)
_hub_args.uri = _args.uses
_hub_args.no_usage = True
_args.uses = HubIO(_hub_args).pull()
if hasattr(_args, 'protocol') and _args.pod_role == PodRoleType.GATEWAY:
_update_gateway_args(_args)
if _args.pod_role == PodRoleType.HEAD:
_args.runtime_cls = 'HeadRuntime'
return _args
|
import multiprocessing
import re
from copy import deepcopy
from functools import partial
from typing import TYPE_CHECKING
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.parsers.helper import _update_gateway_args
if TYPE_CHECKING: # pragma: no cover
from argparse import Namespace
def _get_event(obj) -> multiprocessing.Event:
if isinstance(obj, multiprocessing.Process) or isinstance(
obj, multiprocessing.context.ForkProcess
):
return multiprocessing.Event()
elif isinstance(obj, multiprocessing.context.SpawnProcess):
return multiprocessing.get_context('spawn').Event()
else:
raise TypeError(f'{obj} is not an instance of "multiprocessing.Process"')
class ConditionalEvent:
"""
:class:`ConditionalEvent` provides a common interface to an event (multiprocessing or threading event)
that gets triggered when any of the events provided in input is triggered (OR logic)
:param events_list: The list of events that compose this composable event
"""
def __init__(self, events_list):
super().__init__()
self.event = None
self.event = multiprocessing.synchronize.Event(
ctx=multiprocessing.get_context()
)
self.event_list = events_list
for e in events_list:
self._setup(e, self._state_changed)
self._state_changed()
def _state_changed(self):
bools = [e.is_set() for e in self.event_list]
if any(bools):
self.event.set()
else:
self.event.clear()
def _custom_set(self, e):
e._set()
e._state_changed()
def _custom_clear(self, e):
e._clear()
e._state_changed()
def _setup(self, e, changed_callback):
e._set = e.set
e._clear = e.clear
e._state_changed = changed_callback
e.set = partial(self._custom_set, e)
e.clear = partial(self._custom_clear, e)
def update_runtime_cls(args) -> 'Namespace':
"""Get runtime_cls as a string from args
:param args: pod/deployment namespace args
:return: runtime class as a string
"""
_args = args
if _args.runtime_cls == 'WorkerRuntime' and is_valid_huburi(_args.uses):
_hub_args = deepcopy(_args)
_hub_args.uri = _args.uses
_hub_args.no_usage = True
_args.uses = HubIO(_hub_args).pull()
if hasattr(_args, 'protocol') and _args.pod_role == PodRoleType.GATEWAY:
_update_gateway_args(_args)
if _args.pod_role == PodRoleType.HEAD:
_args.runtime_cls = 'HeadRuntime'
return _args
|
PREFIX = """Answer the following questions as best you can. You have access to the following tools:""" # noqa: E501
FORMAT_INSTRUCTIONS = """Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question"""
SUFFIX = """Begin!
Question: {input}
Thought:{agent_scratchpad}"""
|
# flake8: noqa
PREFIX = """Answer the following questions as best you can. You have access to the following tools:"""
FORMAT_INSTRUCTIONS = """Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question"""
SUFFIX = """Begin!
Question: {input}
Thought:{agent_scratchpad}"""
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import os
from logging import getLogger
from typing import List
from sentencepiece import SentencePieceProcessor
logger = getLogger()
class Tokenizer:
"""tokenizing and encoding/decoding text using SentencePiece."""
def __init__(self, model_path: str):
"""
Initializes the Tokenizer with a SentencePiece model.
Args:
model_path (str): The path to the SentencePiece model file.
"""
# reload tokenizer
assert os.path.isfile(model_path), model_path
self.sp_model = SentencePieceProcessor(model_file=model_path)
logger.info(f"Reloaded SentencePiece model from {model_path}")
# BOS / EOS token IDs
self.n_words: int = self.sp_model.vocab_size()
self.bos_id: int = self.sp_model.bos_id()
self.eos_id: int = self.sp_model.eos_id()
self.pad_id: int = self.sp_model.pad_id()
logger.info(
f"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id}"
)
assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
"""
Encodes a string into a list of token IDs.
Args:
s (str): The input string to be encoded.
bos (bool): Whether to prepend the beginning-of-sequence token.
eos (bool): Whether to append the end-of-sequence token.
Returns:
List[int]: A list of token IDs.
"""
assert type(s) is str
t = self.sp_model.encode(s)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def decode(self, t: List[int]) -> str:
"""
Decodes a list of token IDs into a string.
Args:
t (List[int]): The list of token IDs to be decoded.
Returns:
str: The decoded string.
"""
return self.sp_model.decode(t)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import os
from logging import getLogger
from typing import List
from sentencepiece import SentencePieceProcessor
logger = getLogger()
class Tokenizer:
def __init__(self, model_path: str):
# reload tokenizer
assert os.path.isfile(model_path), model_path
self.sp_model = SentencePieceProcessor(model_file=model_path)
logger.info(f"Reloaded SentencePiece model from {model_path}")
# BOS / EOS token IDs
self.n_words: int = self.sp_model.vocab_size()
self.bos_id: int = self.sp_model.bos_id()
self.eos_id: int = self.sp_model.eos_id()
self.pad_id: int = self.sp_model.pad_id()
logger.info(
f"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id}"
)
assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
assert type(s) is str
t = self.sp_model.encode(s)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def decode(self, t: List[int]) -> str:
return self.sp_model.decode(t)
|
_base_ = './cascade-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
from typing import Optional
import pytest
from langchain_cli.constants import (
DEFAULT_GIT_REF,
DEFAULT_GIT_REPO,
DEFAULT_GIT_SUBDIRECTORY,
)
from langchain_cli.utils.git import DependencySource, parse_dependency_string
def _assert_dependency_equals(
dep: DependencySource,
*,
git: Optional[str] = None,
ref: Optional[str] = None,
subdirectory: Optional[str] = None,
event_metadata: Optional[dict] = None,
) -> None:
if dep["git"] != git:
msg = f"Expected git to be {git} but got {dep['git']}"
raise ValueError(msg)
if dep["ref"] != ref:
msg = f"Expected ref to be {ref} but got {dep['ref']}"
raise ValueError(msg)
if dep["subdirectory"] != subdirectory:
msg = (
f"Expected subdirectory to be {subdirectory} but got {dep['subdirectory']}"
)
raise ValueError(msg)
if dep["subdirectory"] != subdirectory:
msg = (
f"Expected subdirectory to be {subdirectory} but got {dep['subdirectory']}"
)
raise ValueError(msg)
if event_metadata is not None and dep["event_metadata"] != event_metadata:
msg = (
f"Expected event_metadata to be {event_metadata} "
f"but got {dep['event_metadata']}"
)
raise ValueError(msg)
def test_dependency_string() -> None:
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com/efriis/myrepo.git",
None,
None,
None,
),
git="ssh://git@github.com/efriis/myrepo.git",
ref=None,
subdirectory=None,
)
_assert_dependency_equals(
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git#subdirectory=src",
None,
None,
None,
),
git="https://github.com/efriis/myrepo.git",
subdirectory="src",
ref=None,
)
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com:efriis/myrepo.git#develop",
None,
None,
None,
),
git="ssh://git@github.com:efriis/myrepo.git",
ref="develop",
subdirectory=None,
)
# also support a slash in ssh
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com/efriis/myrepo.git#develop",
None,
None,
None,
),
git="ssh://git@github.com/efriis/myrepo.git",
ref="develop",
subdirectory=None,
)
# looks like poetry supports both an @ and a #
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com:efriis/myrepo.git@develop",
None,
None,
None,
),
git="ssh://git@github.com:efriis/myrepo.git",
ref="develop",
subdirectory=None,
)
_assert_dependency_equals(
parse_dependency_string("simple-pirate", None, None, None),
git=DEFAULT_GIT_REPO,
subdirectory=f"{DEFAULT_GIT_SUBDIRECTORY}/simple-pirate",
ref=DEFAULT_GIT_REF,
)
def test_dependency_string_both() -> None:
_assert_dependency_equals(
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git@branch#subdirectory=src",
None,
None,
None,
),
git="https://github.com/efriis/myrepo.git",
subdirectory="src",
ref="branch",
)
def test_dependency_string_invalids() -> None:
# expect error for wrong order
# Bypassing validation since the ValueError message is dynamic
with pytest.raises(ValueError): # noqa: PT011
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git#subdirectory=src@branch",
None,
None,
None,
)
# expect error for @subdirectory
def test_dependency_string_edge_case() -> None:
# weird unsolvable edge case of
# git+ssh://a@b
# this could be a ssh dep with user=a, and default ref
# or a ssh dep at a with ref=b.
# in this case, assume the first case (be greedy with the '@')
_assert_dependency_equals(
parse_dependency_string("git+ssh://a@b", None, None, None),
git="ssh://a@b",
subdirectory=None,
ref=None,
)
# weird one that is actually valid
_assert_dependency_equals(
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git@subdirectory=src",
None,
None,
None,
),
git="https://github.com/efriis/myrepo.git",
subdirectory=None,
ref="subdirectory=src",
)
|
from typing import Optional
import pytest
from langchain_cli.constants import (
DEFAULT_GIT_REF,
DEFAULT_GIT_REPO,
DEFAULT_GIT_SUBDIRECTORY,
)
from langchain_cli.utils.git import DependencySource, parse_dependency_string
def _assert_dependency_equals(
dep: DependencySource,
*,
git: Optional[str] = None,
ref: Optional[str] = None,
subdirectory: Optional[str] = None,
event_metadata: Optional[dict] = None,
) -> None:
assert dep["git"] == git
assert dep["ref"] == ref
assert dep["subdirectory"] == subdirectory
if event_metadata is not None:
assert dep["event_metadata"] == event_metadata
def test_dependency_string() -> None:
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com/efriis/myrepo.git", None, None, None
),
git="ssh://git@github.com/efriis/myrepo.git",
ref=None,
subdirectory=None,
)
_assert_dependency_equals(
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git#subdirectory=src",
None,
None,
None,
),
git="https://github.com/efriis/myrepo.git",
subdirectory="src",
ref=None,
)
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com:efriis/myrepo.git#develop", None, None, None
),
git="ssh://git@github.com:efriis/myrepo.git",
ref="develop",
subdirectory=None,
)
# also support a slash in ssh
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com/efriis/myrepo.git#develop", None, None, None
),
git="ssh://git@github.com/efriis/myrepo.git",
ref="develop",
subdirectory=None,
)
# looks like poetry supports both an @ and a #
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com:efriis/myrepo.git@develop", None, None, None
),
git="ssh://git@github.com:efriis/myrepo.git",
ref="develop",
subdirectory=None,
)
_assert_dependency_equals(
parse_dependency_string("simple-pirate", None, None, None),
git=DEFAULT_GIT_REPO,
subdirectory=f"{DEFAULT_GIT_SUBDIRECTORY}/simple-pirate",
ref=DEFAULT_GIT_REF,
)
def test_dependency_string_both() -> None:
_assert_dependency_equals(
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git@branch#subdirectory=src",
None,
None,
None,
),
git="https://github.com/efriis/myrepo.git",
subdirectory="src",
ref="branch",
)
def test_dependency_string_invalids() -> None:
# expect error for wrong order
with pytest.raises(ValueError):
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git#subdirectory=src@branch",
None,
None,
None,
)
# expect error for @subdirectory
def test_dependency_string_edge_case() -> None:
# weird unsolvable edge case of
# git+ssh://a@b
# this could be a ssh dep with user=a, and default ref
# or a ssh dep at a with ref=b.
# in this case, assume the first case (be greedy with the '@')
_assert_dependency_equals(
parse_dependency_string("git+ssh://a@b", None, None, None),
git="ssh://a@b",
subdirectory=None,
ref=None,
)
# weird one that is actually valid
_assert_dependency_equals(
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git@subdirectory=src",
None,
None,
None,
),
git="https://github.com/efriis/myrepo.git",
subdirectory=None,
ref="subdirectory=src",
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, List, Optional
import spacy
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
:param model_name: pre-trained spaCy language pipeline name
:param require_gpu: device to use for encoding ['cuda', 'cpu] - if not set,
the device is detected automatically
:param default_batch_size: Default batch size, used if ``batch_size`` is not
provided as a parameter in the request
:param default_traversal_paths: Default traversal paths, used if ``traversal_paths``
are not provided as a parameter in the request.
:param args: Additional positional arguments.
:param kwargs: Additional positional arguments.
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
require_gpu: bool = False,
download_data: bool = True,
default_batch_size: int = 32,
default_traversal_paths: List[str] = ['r'],
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
if require_gpu:
spacy.require_gpu()
if download_data:
subprocess.run(
['python', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if docs:
batch_size = parameters.get('batch_size', self.default_batch_size)
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=batch_size,
needs_attr='text',
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
doc.embedding = spacy_doc.vector
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List, Dict, Optional
import numpy as np
import torch
import spacy
from jina import Executor, DocumentArray, requests
from jina.logging.logger import JinaLogger
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
:param lang: pre-trained spaCy language pipeline (model name HashEmbedCNN by default for tok2vec), `en_core_web_sm`
by default. Allows models `en_core_web_md`, `en_core_web_lg`, `en_core_web_trf`. Refer https://spacy.io/models/en.
:param use_default_encoder: if True will use parser component,
otherwise tok2vec implementation will be chosen,
by default False.
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param device: device to use for encoding ['cuda', 'cpu] - if not set, the device is detected automatically
:param args: Additional positional arguments.
:param kwargs: Additional positional arguments.
"""
SPACY_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'tok2vec',
'lemmatizer',
'attribute_ruler',
]
def __init__(self,
lang: str = 'en_core_web_sm',
use_default_encoder: bool = False,
default_traversal_paths: List[str] = ['r'],
device: Optional[str] = None,
*args, **kwargs):
"""Set constructor."""
super().__init__(*args, **kwargs)
self.lang = lang
self.use_default_encoder = use_default_encoder
self.default_traversal_paths = default_traversal_paths
self.logger = JinaLogger(self.__class__.__name__)
if not device:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
if self.device == 'cuda':
spacy.require_gpu()
try:
self.spacy_model = spacy.load(self.lang)
# Disable everything as we only requires certain pipelines to turned on.
ignored_components = []
for comp in self.SPACY_COMPONENTS:
try:
self.spacy_model.disable_pipe(comp)
except Exception:
ignored_components.append(comp)
self.logger.info(f'Ignoring {ignored_components} pipelines as it does not available on the model package.')
except IOError:
self.logger.error(
f'spaCy model for language {self.lang} can not be found. Please install by referring to the '
'official page https://spacy.io/usage/models.'
)
raise
if self.use_default_encoder:
try:
self.spacy_model.enable_pipe('parser')
except ValueError:
self.logger.error(
f'Parser for language {self.lang} can not be found. The default sentence encoder requires'
'DependencyParser to be trained. Please refer to https://spacy.io/api/tok2vec for more clarity.'
)
raise
else:
try:
self.spacy_model.enable_pipe('tok2vec')
except ValueError:
self.logger.error(
f'TokenToVector is not available for language {self.lang}. Please refer to'
'https://github.com/explosion/spaCy/issues/6615 for training your own recognizer.'
)
raise
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding attribute of the docs.
:param docs: documents sent to the encoder. The docs must have `text` as content
:param parameters: dictionary to define the `traversal_path` and the `batch_size`.
For example,
`parameters={'traversal_paths': ['r']}`
will set the parameters for traversal_paths that is actually used`
"""
if docs:
trav_paths = parameters.get('traversal_paths', self.default_traversal_paths)
# traverse thought all documents which have to be processed
flat_docs = docs.traverse_flat(trav_paths)
# filter out documents without text
filtered_docs = [doc for doc in flat_docs if doc.text is not None]
for doc in filtered_docs:
spacy_doc = self.spacy_model(doc.text)
doc.embedding = spacy_doc.vector
|
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Client, Document, Flow
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'http'])
def test_gateway_concurrency(protocol, reraise):
port = 12345
CONCURRENCY = 2
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = end - start
status_codes[index] = req.status.code
def _request(status_codes, durations, index):
with reraise:
start = time.time()
on_done = functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index,
)
results = Client(port=port, protocol=protocol).index(
inputs=(Document() for _ in range(256)), _size=16, return_responses=True
)
assert len(results) > 0
for result in results:
on_done(result)
f = Flow(protocol=protocol, port=port).add(parallel=2)
with f:
threads = []
status_codes = [None] * CONCURRENCY
durations = [None] * CONCURRENCY
for i in range(CONCURRENCY):
t = Thread(target=_request, args=(status_codes, durations, i))
threads.append(t)
t.start()
for t in threads:
t.join()
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'clients: {len(durations)}\n'
f'min roundtrip time: {np.min(durations)}\n'
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
def test_grpc_custom_otpions():
f = Flow(grpc_server_options={'grpc.max_send_message_length': -1})
with f:
pass
|
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Client, Document, Flow
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'http'])
def test_gateway_concurrency(protocol, reraise):
port = 12345
CONCURRENCY = 2
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = end - start
status_codes[index] = req.status.code
def _request(status_codes, durations, index):
with reraise:
start = time.time()
on_done = functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index,
)
results = Client(port=port, protocol=protocol).index(
inputs=(Document() for _ in range(256)), _size=16, return_responses=True
)
assert len(results) > 0
for result in results:
on_done(result)
f = Flow(protocol=protocol, port=port).add(parallel=2)
with f:
threads = []
status_codes = [None] * CONCURRENCY
durations = [None] * CONCURRENCY
for i in range(CONCURRENCY):
t = Thread(target=_request, args=(status_codes, durations, i))
threads.append(t)
t.start()
for t in threads:
t.join()
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'clients: {len(durations)}\n'
f'min roundtrip time: {np.min(durations)}\n'
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
|
import importlib.util
from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict, model_validator
class SpacyEmbeddings(BaseModel, Embeddings):
"""Embeddings by spaCy models.
Attributes:
model_name (str): Name of a spaCy model.
nlp (Any): The spaCy model loaded into memory.
Methods:
embed_documents(texts: List[str]) -> List[List[float]]:
Generates embeddings for a list of documents.
embed_query(text: str) -> List[float]:
Generates an embedding for a single piece of text.
"""
model_name: str = "en_core_web_sm"
nlp: Optional[Any] = None
model_config = ConfigDict(extra="forbid", protected_namespaces=())
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""
Validates that the spaCy package and the model are installed.
Args:
values (Dict): The values provided to the class constructor.
Returns:
The validated values.
Raises:
ValueError: If the spaCy package or the
model are not installed.
"""
if values.get("model_name") is None:
values["model_name"] = "en_core_web_sm"
model_name = values.get("model_name")
# Check if the spaCy package is installed
if importlib.util.find_spec("spacy") is None:
raise ValueError(
"SpaCy package not found. Please install it with `pip install spacy`."
)
try:
# Try to load the spaCy model
import spacy
values["nlp"] = spacy.load(model_name)
except OSError:
# If the model is not found, raise a ValueError
raise ValueError(
f"SpaCy model '{model_name}' not found. "
f"Please install it with"
f" `python -m spacy download {model_name}`"
"or provide a valid spaCy model name."
)
return values # Return the validated values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Generates embeddings for a list of documents.
Args:
texts (List[str]): The documents to generate embeddings for.
Returns:
A list of embeddings, one for each document.
"""
return [self.nlp(text).vector.tolist() for text in texts] # type: ignore[misc]
def embed_query(self, text: str) -> List[float]:
"""
Generates an embedding for a single piece of text.
Args:
text (str): The text to generate an embedding for.
Returns:
The embedding for the text.
"""
return self.nlp(text).vector.tolist() # type: ignore[misc]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Asynchronously generates embeddings for a list of documents.
This method is not implemented and raises a NotImplementedError.
Args:
texts (List[str]): The documents to generate embeddings for.
Raises:
NotImplementedError: This method is not implemented.
"""
raise NotImplementedError("Asynchronous embedding generation is not supported.")
async def aembed_query(self, text: str) -> List[float]:
"""
Asynchronously generates an embedding for a single piece of text.
This method is not implemented and raises a NotImplementedError.
Args:
text (str): The text to generate an embedding for.
Raises:
NotImplementedError: This method is not implemented.
"""
raise NotImplementedError("Asynchronous embedding generation is not supported.")
|
import importlib.util
from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict, model_validator
class SpacyEmbeddings(BaseModel, Embeddings):
"""Embeddings by spaCy models.
Attributes:
model_name (str): Name of a spaCy model.
nlp (Any): The spaCy model loaded into memory.
Methods:
embed_documents(texts: List[str]) -> List[List[float]]:
Generates embeddings for a list of documents.
embed_query(text: str) -> List[float]:
Generates an embedding for a single piece of text.
"""
model_name: str = "en_core_web_sm"
nlp: Optional[Any] = None
model_config = ConfigDict(extra="forbid", protected_namespaces=())
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""
Validates that the spaCy package and the model are installed.
Args:
values (Dict): The values provided to the class constructor.
Returns:
The validated values.
Raises:
ValueError: If the spaCy package or the
model are not installed.
"""
if values.get("model_name") is None:
values["model_name"] = "en_core_web_sm"
model_name = values.get("model_name")
# Check if the spaCy package is installed
if importlib.util.find_spec("spacy") is None:
raise ValueError(
"SpaCy package not found. Please install it with `pip install spacy`."
)
try:
# Try to load the spaCy model
import spacy
values["nlp"] = spacy.load(model_name) # type: ignore[arg-type]
except OSError:
# If the model is not found, raise a ValueError
raise ValueError(
f"SpaCy model '{model_name}' not found. "
f"Please install it with"
f" `python -m spacy download {model_name}`"
"or provide a valid spaCy model name."
)
return values # Return the validated values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Generates embeddings for a list of documents.
Args:
texts (List[str]): The documents to generate embeddings for.
Returns:
A list of embeddings, one for each document.
"""
return [self.nlp(text).vector.tolist() for text in texts] # type: ignore[misc]
def embed_query(self, text: str) -> List[float]:
"""
Generates an embedding for a single piece of text.
Args:
text (str): The text to generate an embedding for.
Returns:
The embedding for the text.
"""
return self.nlp(text).vector.tolist() # type: ignore[misc]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Asynchronously generates embeddings for a list of documents.
This method is not implemented and raises a NotImplementedError.
Args:
texts (List[str]): The documents to generate embeddings for.
Raises:
NotImplementedError: This method is not implemented.
"""
raise NotImplementedError("Asynchronous embedding generation is not supported.")
async def aembed_query(self, text: str) -> List[float]:
"""
Asynchronously generates an embedding for a single piece of text.
This method is not implemented and raises a NotImplementedError.
Args:
text (str): The text to generate an embedding for.
Raises:
NotImplementedError: This method is not implemented.
"""
raise NotImplementedError("Asynchronous embedding generation is not supported.")
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from backend.util.process import AppProcess
def run_processes(*processes: "AppProcess", **kwargs):
"""
Execute all processes in the app. The last process is run in the foreground.
"""
try:
for process in processes[:-1]:
process.start(background=True, **kwargs)
# Run the last process in the foreground
processes[-1].start(background=False, **kwargs)
finally:
for process in processes:
process.stop()
def main(**kwargs):
"""
Run all the processes required for the AutoGPT-server (REST and WebSocket APIs).
"""
from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
from backend.notifications import NotificationManager
from backend.server.rest_api import AgentServer
from backend.server.ws_api import WebsocketServer
run_processes(
DatabaseManager(),
ExecutionManager(),
ExecutionScheduler(),
NotificationManager(),
WebsocketServer(),
AgentServer(),
**kwargs,
)
if __name__ == "__main__":
main()
|
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from backend.util.process import AppProcess
def run_processes(*processes: "AppProcess", **kwargs):
"""
Execute all processes in the app. The last process is run in the foreground.
"""
try:
for process in processes[:-1]:
process.start(background=True, **kwargs)
# Run the last process in the foreground
processes[-1].start(background=False, **kwargs)
finally:
for process in processes:
process.stop()
def main(**kwargs):
"""
Run all the processes required for the AutoGPT-server (REST and WebSocket APIs).
"""
from backend.executor import DatabaseManager, ExecutionManager, ExecutionScheduler
from backend.server.rest_api import AgentServer
from backend.server.ws_api import WebsocketServer
run_processes(
DatabaseManager(),
ExecutionManager(),
ExecutionScheduler(),
WebsocketServer(),
AgentServer(),
**kwargs,
)
if __name__ == "__main__":
main()
|
from .database import DatabaseManager
from .manager import ExecutionManager
from .scheduler import Scheduler
__all__ = [
"DatabaseManager",
"ExecutionManager",
"Scheduler",
]
|
from .database import DatabaseManager
from .manager import ExecutionManager
from .scheduler import ExecutionScheduler
__all__ = [
"DatabaseManager",
"ExecutionManager",
"ExecutionScheduler",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .coco_api import COCO, COCOeval, COCOPanoptic
from .cocoeval_mp import COCOevalMP
__all__ = ['COCO', 'COCOeval', 'COCOPanoptic', 'COCOevalMP']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .coco_api import COCO, COCOeval, COCOPanoptic
__all__ = ['COCO', 'COCOeval', 'COCOPanoptic']
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import MagicMock, Mock
import torch
from torch import nn
from mmengine.hooks import OptimizerHook
class TestOptimizerHook:
def test_after_train_iter(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=1,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
self.conv2 = nn.Conv2d(
in_channels=2,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
self.conv3 = nn.Conv2d(
in_channels=1,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
return x1, x2
model = Model()
x = torch.rand(1, 1, 3, 3)
dummy_runner = MagicMock()
dummy_runner.optimizer.zero_grad = Mock(return_value=None)
dummy_runner.optimizer.step = Mock(return_value=None)
dummy_runner.model = model
dummy_runner.outputs = dict()
dummy_runner.outputs['num_samples'] = 0
class DummyLogger():
def __init__(self):
self.msg = ''
def log(self, msg=None, **kwargs):
self.msg += msg
dummy_runner.logger = DummyLogger()
optimizer_hook = OptimizerHook(
dict(max_norm=2), detect_anomalous_params=True)
dummy_runner.outputs['loss'] = model(x)[0].sum()
dummy_runner.outputs['loss'].backward = Mock(
wraps=dummy_runner.outputs['loss'].backward)
optimizer_hook.detect_anomalous_parameters = Mock(
wraps=optimizer_hook.detect_anomalous_parameters)
optimizer_hook.clip_grads = Mock(wraps=optimizer_hook.clip_grads)
optimizer_hook.after_train_iter(dummy_runner, 0)
# assert the parameters of conv2 and conv3 are not in the
# computational graph which is with x1.sum() as root.
assert 'conv2.weight' in dummy_runner.logger.msg
assert 'conv2.bias' in dummy_runner.logger.msg
assert 'conv3.weight' in dummy_runner.logger.msg
assert 'conv3.bias' in dummy_runner.logger.msg
assert 'conv1.weight' not in dummy_runner.logger.msg
assert 'conv1.bias' not in dummy_runner.logger.msg
dummy_runner.optimizer.step.assert_called()
dummy_runner.outputs['loss'].backward.assert_called()
optimizer_hook.clip_grads.assert_called()
optimizer_hook.detect_anomalous_parameters.assert_called()
dummy_runner.outputs['loss'] = model(x)[1].sum()
dummy_runner.logger.msg = ''
optimizer_hook.after_train_iter(dummy_runner, 0)
# assert the parameters of conv3 are not in the computational graph
assert 'conv3.weight' in dummy_runner.logger.msg
assert 'conv3.bias' in dummy_runner.logger.msg
assert 'conv2.weight' not in dummy_runner.logger.msg
assert 'conv2.bias' not in dummy_runner.logger.msg
assert 'conv1.weight' not in dummy_runner.logger.msg
assert 'conv1.bias' not in dummy_runner.logger.msg
# grad_clip is None and detect_anomalous_parameters is False
optimizer_hook = OptimizerHook(detect_anomalous_params=False)
optimizer_hook.detect_anomalous_parameters = Mock(
wraps=optimizer_hook.detect_anomalous_parameters)
optimizer_hook.clip_grads = Mock(wraps=optimizer_hook.clip_grads)
dummy_runner.outputs['loss'] = model(x)[0].sum()
dummy_runner.outputs['loss'].backward = Mock(
wraps=dummy_runner.outputs['loss'].backward)
optimizer_hook.after_train_iter(dummy_runner, 0)
dummy_runner.optimizer.step.assert_called()
dummy_runner.outputs['loss'].backward.assert_called()
optimizer_hook.clip_grads.assert_not_called()
optimizer_hook.detect_anomalous_parameters.assert_not_called()
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from torch import nn
from mmengine.hooks import OptimizerHook
class TestOptimizerHook:
def test_after_train_iter(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=1,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
self.conv2 = nn.Conv2d(
in_channels=2,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
self.conv3 = nn.Conv2d(
in_channels=1,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
return x1, x2
model = Model()
x = torch.rand(1, 1, 3, 3)
dummy_runner = Mock()
dummy_runner.optimizer.zero_grad = Mock(return_value=None)
dummy_runner.optimizer.step = Mock(return_value=None)
dummy_runner.model = model
dummy_runner.outputs = dict()
dummy_runner.outputs['num_samples'] = 0
class DummyLogger():
def __init__(self):
self.msg = ''
def log(self, msg=None, **kwargs):
self.msg += msg
dummy_runner.logger = DummyLogger()
optimizer_hook = OptimizerHook(
dict(max_norm=2), detect_anomalous_params=True)
dummy_runner.outputs['loss'] = model(x)[0].sum()
dummy_runner.outputs['loss'].backward = Mock(
wraps=dummy_runner.outputs['loss'].backward)
optimizer_hook.detect_anomalous_parameters = Mock(
wraps=optimizer_hook.detect_anomalous_parameters)
optimizer_hook.clip_grads = Mock(wraps=optimizer_hook.clip_grads)
optimizer_hook.after_train_iter(dummy_runner, 0)
# assert the parameters of conv2 and conv3 are not in the
# computational graph which is with x1.sum() as root.
assert 'conv2.weight' in dummy_runner.logger.msg
assert 'conv2.bias' in dummy_runner.logger.msg
assert 'conv3.weight' in dummy_runner.logger.msg
assert 'conv3.bias' in dummy_runner.logger.msg
assert 'conv1.weight' not in dummy_runner.logger.msg
assert 'conv1.bias' not in dummy_runner.logger.msg
dummy_runner.optimizer.step.assert_called()
dummy_runner.outputs['loss'].backward.assert_called()
optimizer_hook.clip_grads.assert_called()
optimizer_hook.detect_anomalous_parameters.assert_called()
dummy_runner.outputs['loss'] = model(x)[1].sum()
dummy_runner.logger.msg = ''
optimizer_hook.after_train_iter(dummy_runner, 0)
# assert the parameters of conv3 are not in the computational graph
assert 'conv3.weight' in dummy_runner.logger.msg
assert 'conv3.bias' in dummy_runner.logger.msg
assert 'conv2.weight' not in dummy_runner.logger.msg
assert 'conv2.bias' not in dummy_runner.logger.msg
assert 'conv1.weight' not in dummy_runner.logger.msg
assert 'conv1.bias' not in dummy_runner.logger.msg
# grad_clip is None and detect_anomalous_parameters is False
optimizer_hook = OptimizerHook(detect_anomalous_params=False)
optimizer_hook.detect_anomalous_parameters = Mock(
wraps=optimizer_hook.detect_anomalous_parameters)
optimizer_hook.clip_grads = Mock(wraps=optimizer_hook.clip_grads)
dummy_runner.outputs['loss'] = model(x)[0].sum()
dummy_runner.outputs['loss'].backward = Mock(
wraps=dummy_runner.outputs['loss'].backward)
optimizer_hook.after_train_iter(dummy_runner, 0)
dummy_runner.optimizer.step.assert_called()
dummy_runner.outputs['loss'].backward.assert_called()
optimizer_hook.clip_grads.assert_not_called()
optimizer_hook.detect_anomalous_parameters.assert_not_called()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
import torch
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptMultiConfig
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class SingleRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
according to its scale. The mapping rule is proposed in
`FPN <https://arxiv.org/abs/1612.03144>`_.
Args:
roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and
arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (List[int]): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0.
Defaults to 56.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
roi_layer: ConfigType,
out_channels: int,
featmap_strides: List[int],
finest_scale: int = 56,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
roi_layer=roi_layer,
out_channels=out_channels,
featmap_strides=featmap_strides,
init_cfg=init_cfg)
self.finest_scale = finest_scale
def map_roi_levels(self, rois: Tensor, num_levels: int) -> Tensor:
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None):
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
# convert fp32 to fp16 when amp is on
rois = rois.type_as(feats[0])
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# TODO: remove this when parrots supports
if torch.__version__ == 'parrots':
roi_feats.requires_grad = True
if num_levels == 1:
if len(rois) == 0:
return roi_feats
return self.roi_layers[0](feats[0], rois)
target_lvls = self.map_roi_levels(rois, num_levels)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
mask = target_lvls == i
inds = mask.nonzero(as_tuple=False).squeeze(1)
if inds.numel() > 0:
rois_ = rois[inds]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
else:
# Sometimes some pyramid levels will not be used for RoI
# feature extraction and this will cause an incomplete
# computation graph in one GPU, which is different from those
# in other GPUs and will cause a hanging error.
# Therefore, we add it to ensure each feature pyramid is
# included in the computation graph to avoid runtime bugs.
roi_feats += sum(
x.view(-1)[0]
for x in self.parameters()) * 0. + feats[i].sum() * 0.
return roi_feats
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple
import torch
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptMultiConfig
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class SingleRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
according to its scale. The mapping rule is proposed in
`FPN <https://arxiv.org/abs/1612.03144>`_.
Args:
roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and
arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (List[int]): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0.
Defaults to 56.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
roi_layer: ConfigType,
out_channels: int,
featmap_strides: List[int],
finest_scale: int = 56,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
roi_layer=roi_layer,
out_channels=out_channels,
featmap_strides=featmap_strides,
init_cfg=init_cfg)
self.finest_scale = finest_scale
def map_roi_levels(self, rois: Tensor, num_levels: int) -> Tensor:
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None):
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# TODO: remove this when parrots supports
if torch.__version__ == 'parrots':
roi_feats.requires_grad = True
if num_levels == 1:
if len(rois) == 0:
return roi_feats
return self.roi_layers[0](feats[0], rois)
target_lvls = self.map_roi_levels(rois, num_levels)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
mask = target_lvls == i
inds = mask.nonzero(as_tuple=False).squeeze(1)
if inds.numel() > 0:
rois_ = rois[inds]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
else:
# Sometimes some pyramid levels will not be used for RoI
# feature extraction and this will cause an incomplete
# computation graph in one GPU, which is different from those
# in other GPUs and will cause a hanging error.
# Therefore, we add it to ensure each feature pyramid is
# included in the computation graph to avoid runtime bugs.
roi_feats += sum(
x.view(-1)[0]
for x in self.parameters()) * 0. + feats[i].sum() * 0.
return roi_feats
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
from mmengine.fileio import list_from_file
from mmdet.registry import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
CLASSES = ('face', )
PALETTE = [(0, 255, 0)]
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotation from WIDERFace XML style annotation file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
data_infos = []
img_ids = list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(
dict(
id=img_id,
filename=osp.join(folder, filename),
width=width,
height=height))
return data_infos
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from mmdet.registry import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
CLASSES = ('face', )
PALETTE = [(0, 255, 0)]
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotation from WIDERFace XML style annotation file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(
dict(
id=img_id,
filename=osp.join(folder, filename),
width=width,
height=height))
return data_infos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.