input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import os
from typing import Any, Callable, List, Optional, Tuple
import torch.utils.data as data
from ..utils import _log_api_usage_once
class VisionDataset(data.Dataset):
"""
Base Class For making datasets which are compatible with torchvision.
It is necessary to override the ``__getitem__`` and ``__len__`` method.
Args:
root (string, optional): Root directory of dataset. Only used for `__repr__`.
transforms (callable, optional): A function/transforms that takes in
an image and a label and returns the transformed versions of both.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
.. note::
:attr:`transforms` and the combination of :attr:`transform` and :attr:`target_transform` are mutually exclusive.
"""
_repr_indent = 4
def __init__(
self,
root: str = None, # type: ignore[assignment]
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
_log_api_usage_once(self)
if isinstance(root, str):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can be passed as argument")
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index: int) -> Any:
"""
Args:
index (int): Index
Returns:
(Any): Sample and meta data, optionally transformed by the respective transforms.
"""
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def __repr__(self) -> str:
head = "Dataset " + self.__class__.__name__
body = [f"Number of datapoints: {self.__len__()}"]
if self.root is not None:
body.append(f"Root location: {self.root}")
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def extra_repr(self) -> str:
return ""
class StandardTransform:
def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None:
self.transform = transform
self.target_transform = target_transform
def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]:
if self.transform is not None:
input = self.transform(input)
if self.target_transform is not None:
target = self.target_transform(target)
return input, target
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def __repr__(self) -> str:
body = [self.__class__.__name__]
if self.transform is not None:
body += self._format_transform_repr(self.transform, "Transform: ")
if self.target_transform is not None:
body += self._format_transform_repr(self.target_transform, "Target transform: ")
return "\n".join(body)
|
import os
from typing import Any, Callable, List, Optional, Tuple
import torch.utils.data as data
from ..utils import _log_api_usage_once
class VisionDataset(data.Dataset):
"""
Base Class For making datasets which are compatible with torchvision.
It is necessary to override the ``__getitem__`` and ``__len__`` method.
Args:
root (string, optional): Root directory of dataset. Only used for `__repr__`.
transforms (callable, optional): A function/transforms that takes in
an image and a label and returns the transformed versions of both.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
.. note::
:attr:`transforms` and the combination of :attr:`transform` and :attr:`target_transform` are mutually exclusive.
"""
_repr_indent = 4
def __init__(
self,
root: str = None, # type: ignore[assignment]
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
_log_api_usage_once(self)
if isinstance(root, str):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can be passed as argument")
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index: int) -> Any:
"""
Args:
index (int): Index
Returns:
(Any): Sample and meta data, optionally transformed by the respective transforms.
"""
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def __repr__(self) -> str:
head = "Dataset " + self.__class__.__name__
body = [f"Number of datapoints: {self.__len__()}"]
if self.root is not None:
body.append(f"Root location: {self.root}")
body += self.extra_repr().splitlines()
if hasattr(self, "transforms") and self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return "\n".join(lines)
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def extra_repr(self) -> str:
return ""
class StandardTransform:
def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None:
self.transform = transform
self.target_transform = target_transform
def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]:
if self.transform is not None:
input = self.transform(input)
if self.target_transform is not None:
target = self.target_transform(target)
return input, target
def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
lines = transform.__repr__().splitlines()
return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
def __repr__(self) -> str:
body = [self.__class__.__name__]
if self.transform is not None:
body += self._format_transform_repr(self.transform, "Transform: ")
if self.target_transform is not None:
body += self._format_transform_repr(self.target_transform, "Target transform: ")
return "\n".join(body)
|
"""Chains and utils related to evaluating question answering functionality."""
from langchain.evaluation.qa.eval_chain import (
ContextQAEvalChain,
CotQAEvalChain,
QAEvalChain,
)
from langchain.evaluation.qa.generate_chain import QAGenerateChain
__all__ = ["ContextQAEvalChain", "CotQAEvalChain", "QAEvalChain", "QAGenerateChain"]
|
"""Chains and utils related to evaluating question answering functionality."""
from langchain.evaluation.qa.eval_chain import (
ContextQAEvalChain,
CotQAEvalChain,
QAEvalChain,
)
from langchain.evaluation.qa.generate_chain import QAGenerateChain
__all__ = ["QAEvalChain", "QAGenerateChain", "ContextQAEvalChain", "CotQAEvalChain"]
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
from mmdet.utils import get_device
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# In distributed sampling, different ranks should sample
# non-overlapped data in the dataset. Therefore, this function
# is used to make sure that each rank shuffles the data indices
# in the same order based on the same seed. Then different ranks
# could use different indices to select non-overlapped data from the
# same data list.
device = get_device()
self.seed = sync_random_seed(seed, device)
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
# When :attr:`shuffle=True`, this ensures all replicas
# use a different random ordering for each epoch.
# Otherwise, the next iteration of this sampler will
# yield the same ordering.
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmdet.core.utils import sync_random_seed
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# In distributed sampling, different ranks should sample
# non-overlapped data in the dataset. Therefore, this function
# is used to make sure that each rank shuffles the data indices
# in the same order based on the same seed. Then different ranks
# could use different indices to select non-overlapped data from the
# same data list.
self.seed = sync_random_seed(seed)
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
# When :attr:`shuffle=True`, this ensures all replicas
# use a different random ordering for each epoch.
# Otherwise, the next iteration of this sampler will
# yield the same ordering.
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
from .DenoisingAutoEncoderDataset import DenoisingAutoEncoderDataset
from .NoDuplicatesDataLoader import NoDuplicatesDataLoader
from .ParallelSentencesDataset import ParallelSentencesDataset
from .SentenceLabelDataset import SentenceLabelDataset
from .SentencesDataset import SentencesDataset
__all__ = [
"DenoisingAutoEncoderDataset",
"NoDuplicatesDataLoader",
"ParallelSentencesDataset",
"SentencesDataset",
"SentenceLabelDataset",
]
|
from .DenoisingAutoEncoderDataset import DenoisingAutoEncoderDataset
from .NoDuplicatesDataLoader import NoDuplicatesDataLoader
from .ParallelSentencesDataset import ParallelSentencesDataset
from .SentencesDataset import SentencesDataset
from .SentenceLabelDataset import SentenceLabelDataset
__all__ = [
"DenoisingAutoEncoderDataset",
"NoDuplicatesDataLoader",
"ParallelSentencesDataset",
"SentencesDataset",
"SentenceLabelDataset",
]
|
from langchain_core.prompts.prompt import PromptTemplate
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:""" # noqa: E501
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:""" # noqa: E501
QA_PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
|
# flake8: noqa
from langchain_core.prompts.prompt import PromptTemplate
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:"""
QA_PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, List, Dict
import numpy as np
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
class ImageTFEncoder(Executor):
"""
:class:`ImageTFEncoder` encodes ``Document`` content from a ndarray,
potentially B x (Height x Width x Channel) into a ndarray of `B x D`.
Where `B` is the batch size and `D` is the Dimension.
The :class:`ImageTFEncoder` wraps the models from
`tensorflow.keras.applications`.
<https://www.tensorflow.org/api_docs/python/tf/keras/applications>`_
:param model_name: the name of the model. Supported models include
``DenseNet121``, ``DenseNet169``, ``DenseNet201``,
``InceptionResNetV2``, ``InceptionV3``, ``MobileNet``,
``MobileNetV2``, ``NASNetLarge``, ``NASNetMobile``,
``ResNet101``, ``ResNet152``, ``ResNet50``, ``ResNet101V2``,
``ResNet152V2``, ``ResNet50V2``, ``VGG16``, ``VGG19``,
``Xception`` and etc. A full list can be find at
<https://www.tensorflow.org/api_docs/python/tf/keras/applications#functions>`_
:param img_shape: The shape of the image to be encoded.
:param pool_strategy: the pooling strategy. Options are:
- `None`: Means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg`: ;eans that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max`: Means that global max pooling will be applied.
:param default_batch_size: size of each batch
:param default_traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param on_gpu: set to True if using GPU
:param args: additional positional arguments.
:param kwargs: additional positional arguments.
"""
def __init__(self,
model_name: str = 'MobileNetV2',
img_shape: int = 336,
pool_strategy: str = 'max',
default_batch_size: int = 32,
default_traversal_paths: List[str] = None,
on_gpu: bool = True,
*args,
**kwargs):
super().__init__(*args, **kwargs)
if default_traversal_paths is None:
default_traversal_paths = ['r']
self.model_name = model_name
self.pool_strategy = pool_strategy
self.img_shape = img_shape
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.on_gpu = on_gpu
self.logger = JinaLogger(self.__class__.__name__)
import tensorflow as tf
cpus = tf.config.experimental.list_physical_devices(device_type='CPU')
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
if self.on_gpu and len(gpus) > 0:
cpus.append(gpus[0])
if self.on_gpu and len(gpus) == 0:
self.logger.warning('You tried to use a GPU but no GPU was found on'
' your system. Defaulting to CPU!')
tf.config.experimental.set_visible_devices(devices=cpus)
model = getattr(tf.keras.applications, self.model_name)(
input_shape=(self.img_shape, self.img_shape, 3),
include_top=False,
pooling=self.pool_strategy,
weights='imagenet')
model.trainable = False
self.model = model
@requests
def encode(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""
Encode document content into a ndarray of `B x D`. `
B` is the batch size and `D` is the Dimension.
:param docs: DocumentArray containing blob as image data.
:param args: additional positional arguments.
:param kwargs: additional positional arguments.
:return: Encoded result as a `BatchSize x D` numpy ``ndarray``,
`D` is the output dimension
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.default_traversal_paths),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='blob'
)
self._create_embeddings(document_batches_generator)
def _create_embeddings(self, document_batches_generator: Iterable):
for document_batch in document_batches_generator:
blob_batch = np.stack([d.blob for d in document_batch])
embedding_batch = self.model(blob_batch)
for document, embedding in zip(document_batch, embedding_batch):
document.embedding = np.array(embedding)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, List, Dict
import numpy as np
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
class ImageTFEncoder(Executor):
"""
:class:`ImageTFEncoder` encodes ``Document`` content from a ndarray,
potentially B x (Height x Width x Channel) into a ndarray of `B x D`.
Where `B` is the batch size and `D` is the Dimension.
The :class:`ImageTFEncoder` wraps the models from
`tensorflow.keras.applications`. <https://keras.io/applications/>`_.
:param model_name: the name of the model. Supported models include
``DenseNet121``, ``DenseNet169``, ``DenseNet201``,
``InceptionResNetV2``, ``InceptionV3``, ``MobileNet``,
``MobileNetV2``, ``NASNetLarge``, ``NASNetMobile``,
``ResNet101``, ``ResNet152``, ``ResNet50``, ``ResNet101V2``,
``ResNet152V2``, ``ResNet50V2``, ``VGG16``, ``VGG19``,
``Xception``,
:param img_shape: The shape of the image to be encoded.
:param pool_strategy: the pooling strategy. Options are:
- `None`: Means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg`: ;eans that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max`: Means that global max pooling will be applied.
:param default_batch_size: size of each batch
:param default_traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param on_gpu: set to True if using GPU
:param args: additional positional arguments.
:param kwargs: additional positional arguments.
"""
def __init__(self,
model_name: str = 'MobileNetV2',
img_shape: int = 336,
pool_strategy: str = 'max',
default_batch_size: int = 32,
default_traversal_paths: List[str] = None,
on_gpu: bool = True,
*args,
**kwargs):
super().__init__(*args, **kwargs)
if default_traversal_paths is None:
default_traversal_paths = ['r']
self.model_name = model_name
self.pool_strategy = pool_strategy
self.img_shape = img_shape
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.on_gpu = on_gpu
self.logger = JinaLogger(self.__class__.__name__)
import tensorflow as tf
cpus = tf.config.experimental.list_physical_devices(device_type='CPU')
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
if self.on_gpu and len(gpus) > 0:
cpus.append(gpus[0])
if self.on_gpu and len(gpus) == 0:
self.logger.warning('You tried to use a GPU but no GPU was found on'
' your system. Defaulting to CPU!')
tf.config.experimental.set_visible_devices(devices=cpus)
model = getattr(tf.keras.applications, self.model_name)(
input_shape=(self.img_shape, self.img_shape, 3),
include_top=False,
pooling=self.pool_strategy,
weights='imagenet')
model.trainable = False
self.model = model
@requests
def encode(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""
Encode document content into a ndarray of `B x D`. `
B` is the batch size and `D` is the Dimension.
:param docs: DocumentArray containing blob as image data.
:param args: additional positional arguments.
:param kwargs: additional positional arguments.
:return: Encoded result as a `BatchSize x D` numpy ``ndarray``,
`D` is the output dimension
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.default_traversal_paths),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='blob'
)
self._create_embeddings(document_batches_generator)
def _create_embeddings(self, document_batches_generator: Iterable):
for document_batch in document_batches_generator:
blob_batch = np.stack([d.blob for d in document_batch])
embedding_batch = self.model(blob_batch)
for document, embedding in zip(document_batch, embedding_batch):
document.embedding = np.array(embedding)
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image_tensor,
get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image_pil,
rgb_to_grayscale_image_tensor,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
to_grayscale,
)
from ._geometry import (
affine,
affine_bounding_boxes,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
convert_image_dtype,
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
to_dtype,
to_dtype_image_tensor,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image_tensor,
get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image_pil,
rgb_to_grayscale_image_tensor,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
to_grayscale,
)
from ._geometry import (
affine,
affine_bounding_boxes,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
convert_image_dtype,
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
to_dtype,
to_dtype_image_tensor,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config, load_dataset_builder
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.utils.file_utils import cached_path
DATASETS_ON_HF_GCP = [
{"dataset": "wikipedia", "config_name": "20220301.de", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.en", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.fr", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.frr", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.it", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.simple", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wiki40b", "config_name": "en", "revision": "7b21a2e64b90323b2d3d1b81aa349bb4bc76d9bf"},
{
"dataset": "wiki_dpr",
"config_name": "psgs_w100.nq.compressed",
"revision": "b24a417d802a583f8922946c1c75210290e93108",
},
{
"dataset": "wiki_dpr",
"config_name": "psgs_w100.nq.no_index",
"revision": "b24a417d802a583f8922946c1c75210290e93108",
},
{
"dataset": "wiki_dpr",
"config_name": "psgs_w100.multiset.no_index",
"revision": "b24a417d802a583f8922946c1c75210290e93108",
},
{"dataset": "natural_questions", "config_name": "default", "revision": "19ba7767b174ad046a84f46af056517a3910ee57"},
]
def list_datasets_on_hf_gcp_parameters(with_config=True, with_revision=True):
columns = ["dataset"]
if with_config:
columns.append("config_name")
if with_revision:
columns.append("revision")
dataset_list = [{col: dataset[col] for col in columns} for dataset in DATASETS_ON_HF_GCP]
def get_testcase_name(dataset):
testcase_name = dataset["dataset"]
if with_config:
testcase_name += "/" + dataset["config_name"]
if with_revision:
testcase_name += "@" + dataset["revision"]
return testcase_name
dataset_list = [{"testcase_name": get_testcase_name(dataset), **dataset} for dataset in dataset_list]
return dataset_list
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True, with_revision=True))
class TestDatasetOnHfGcp(TestCase):
dataset = None
config_name = None
revision = None
def test_dataset_info_available(self, dataset, config_name, revision):
with TemporaryDirectory() as tmp_dir:
builder = load_dataset_builder(
dataset,
config_name,
revision=revision,
cache_dir=tmp_dir,
trust_remote_code=True,
)
dataset_info_url = "/".join(
[
HF_GCP_BASE_URL,
builder._relative_data_dir(with_hash=False).replace(os.sep, "/"),
config.DATASET_INFO_FILENAME,
]
)
datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir)
self.assertTrue(os.path.exists(datset_info_path))
@pytest.mark.integration
def test_as_dataset_from_hf_gcs(tmp_path_factory):
tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
builder = load_dataset_builder("wikipedia", "20220301.frr", cache_dir=tmp_dir, trust_remote_code=True)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
builder._download_and_prepare = None
builder.download_and_prepare(try_from_hf_gcs=True)
ds = builder.as_dataset()
assert ds
@pytest.mark.integration
def test_as_streaming_dataset_from_hf_gcs(tmp_path):
builder = load_dataset_builder(
"wikipedia",
"20220301.frr",
revision="4d013bdd32c475c8536aae00a56efc774f061649",
cache_dir=tmp_path,
trust_remote_code=True,
)
ds = builder.as_streaming_dataset()
assert ds
assert isinstance(ds, IterableDatasetDict)
assert "train" in ds
assert isinstance(ds["train"], IterableDataset)
assert next(iter(ds["train"]))
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config, load_dataset_builder
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.utils.file_utils import cached_path
DATASETS_ON_HF_GCP = [
{"dataset": "wikipedia", "config_name": "20220301.de", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.en", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.fr", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.frr", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.it", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wikipedia", "config_name": "20220301.simple", "revision": "4d013bdd32c475c8536aae00a56efc774f061649"},
{"dataset": "wiki40b", "config_name": "en", "revision": "7b21a2e64b90323b2d3d1b81aa349bb4bc76d9bf"},
{
"dataset": "wiki_dpr",
"config_name": "psgs_w100.nq.compressed",
"revision": "b24a417d802a583f8922946c1c75210290e93108",
},
{
"dataset": "wiki_dpr",
"config_name": "psgs_w100.nq.no_index",
"revision": "b24a417d802a583f8922946c1c75210290e93108",
},
{
"dataset": "wiki_dpr",
"config_name": "psgs_w100.multiset.no_index",
"revision": "b24a417d802a583f8922946c1c75210290e93108",
},
{"dataset": "natural_questions", "config_name": "default", "revision": "19ba7767b174ad046a84f46af056517a3910ee57"},
]
def list_datasets_on_hf_gcp_parameters(with_config=True, with_revision=True):
columns = ["dataset"]
if with_config:
columns.append("config_name")
if with_revision:
columns.append("revision")
dataset_list = [{col: dataset[col] for col in columns} for dataset in DATASETS_ON_HF_GCP]
def get_testcase_name(dataset):
testcase_name = dataset["dataset"]
if with_config:
testcase_name += "/" + dataset["config_name"]
if with_revision:
testcase_name += "@" + dataset["revision"]
return testcase_name
dataset_list = [{"testcase_name": get_testcase_name(dataset), **dataset} for dataset in dataset_list]
return dataset_list
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True, with_revision=True))
class TestDatasetOnHfGcp(TestCase):
dataset = None
config_name = None
revision = None
def test_dataset_info_available(self, dataset, config_name, revision):
with TemporaryDirectory() as tmp_dir:
builder = load_dataset_builder(
dataset,
config_name,
revision=revision,
cache_dir=tmp_dir,
)
dataset_info_url = "/".join(
[
HF_GCP_BASE_URL,
builder._relative_data_dir(with_hash=False).replace(os.sep, "/"),
config.DATASET_INFO_FILENAME,
]
)
datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir)
self.assertTrue(os.path.exists(datset_info_path))
@pytest.mark.integration
def test_as_dataset_from_hf_gcs(tmp_path_factory):
tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
builder = load_dataset_builder("wikipedia", "20220301.frr", cache_dir=tmp_dir)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
builder._download_and_prepare = None
builder.download_and_prepare(try_from_hf_gcs=True)
ds = builder.as_dataset()
assert ds
@pytest.mark.integration
def test_as_streaming_dataset_from_hf_gcs(tmp_path):
builder = load_dataset_builder(
"wikipedia", "20220301.frr", revision="4d013bdd32c475c8536aae00a56efc774f061649", cache_dir=tmp_path
)
ds = builder.as_streaming_dataset()
assert ds
assert isinstance(ds, IterableDatasetDict)
assert "train" in ds
assert isinstance(ds["train"], IterableDataset)
assert next(iter(ds["train"]))
|
_base_ = './gfl_r50_fpn_1x_coco.py'
max_epochs = 24
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './gfl_r50_fpn_1x_coco.py'
max_epochs = 24
# learning policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
import datetime
from typing import List
import prisma.enums
import pydantic
from backend.server.model import Pagination
class MyAgent(pydantic.BaseModel):
agent_id: str
agent_version: int
agent_name: str
agent_image: str | None = None
description: str
last_edited: datetime.datetime
class MyAgentsResponse(pydantic.BaseModel):
agents: list[MyAgent]
pagination: Pagination
class StoreAgent(pydantic.BaseModel):
slug: str
agent_name: str
agent_image: str
creator: str
creator_avatar: str
sub_heading: str
description: str
runs: int
rating: float
class StoreAgentsResponse(pydantic.BaseModel):
agents: list[StoreAgent]
pagination: Pagination
class StoreAgentDetails(pydantic.BaseModel):
store_listing_version_id: str
slug: str
agent_name: str
agent_video: str
agent_image: list[str]
creator: str
creator_avatar: str
sub_heading: str
description: str
categories: list[str]
runs: int
rating: float
versions: list[str]
last_updated: datetime.datetime
active_version_id: str | None = None
has_approved_version: bool = False
class Creator(pydantic.BaseModel):
name: str
username: str
description: str
avatar_url: str
num_agents: int
agent_rating: float
agent_runs: int
is_featured: bool
class CreatorsResponse(pydantic.BaseModel):
creators: List[Creator]
pagination: Pagination
class CreatorDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
agent_rating: float
agent_runs: int
top_categories: list[str]
class Profile(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
is_featured: bool = False
class StoreSubmission(pydantic.BaseModel):
agent_id: str
agent_version: int
name: str
sub_heading: str
slug: str
description: str
image_urls: list[str]
date_submitted: datetime.datetime
status: prisma.enums.SubmissionStatus
runs: int
rating: float
store_listing_version_id: str | None = None
version: int | None = None # Actual version number from the database
reviewer_id: str | None = None
review_comments: str | None = None # External comments visible to creator
internal_comments: str | None = None # Private notes for admin use only
reviewed_at: datetime.datetime | None = None
changes_summary: str | None = None
reviewer_id: str | None = None
review_comments: str | None = None # External comments visible to creator
internal_comments: str | None = None # Private notes for admin use only
reviewed_at: datetime.datetime | None = None
changes_summary: str | None = None
class StoreSubmissionsResponse(pydantic.BaseModel):
submissions: list[StoreSubmission]
pagination: Pagination
class StoreListingWithVersions(pydantic.BaseModel):
"""A store listing with its version history"""
listing_id: str
slug: str
agent_id: str
agent_version: int
active_version_id: str | None = None
has_approved_version: bool = False
creator_email: str | None = None
latest_version: StoreSubmission | None = None
versions: list[StoreSubmission] = []
class StoreListingsWithVersionsResponse(pydantic.BaseModel):
"""Response model for listings with version history"""
listings: list[StoreListingWithVersions]
pagination: Pagination
class StoreSubmissionRequest(pydantic.BaseModel):
agent_id: str
agent_version: int
slug: str
name: str
sub_heading: str
video_url: str | None = None
image_urls: list[str] = []
description: str = ""
categories: list[str] = []
changes_summary: str | None = None
class ProfileDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str | None = None
class StoreReview(pydantic.BaseModel):
score: int
comments: str | None = None
class StoreReviewCreate(pydantic.BaseModel):
store_listing_version_id: str
score: int
comments: str | None = None
class ReviewSubmissionRequest(pydantic.BaseModel):
store_listing_version_id: str
is_approved: bool
comments: str # External comments visible to creator
internal_comments: str | None = None # Private admin notes
|
import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class MyAgent(pydantic.BaseModel):
agent_id: str
agent_version: int
agent_name: str
agent_image: str | None = None
description: str
last_edited: datetime.datetime
class MyAgentsResponse(pydantic.BaseModel):
agents: list[MyAgent]
pagination: Pagination
class StoreAgent(pydantic.BaseModel):
slug: str
agent_name: str
agent_image: str
creator: str
creator_avatar: str
sub_heading: str
description: str
runs: int
rating: float
class StoreAgentsResponse(pydantic.BaseModel):
agents: list[StoreAgent]
pagination: Pagination
class StoreAgentDetails(pydantic.BaseModel):
store_listing_version_id: str
slug: str
agent_name: str
agent_video: str
agent_image: list[str]
creator: str
creator_avatar: str
sub_heading: str
description: str
categories: list[str]
runs: int
rating: float
versions: list[str]
last_updated: datetime.datetime
active_version_id: str | None = None
has_approved_version: bool = False
class Creator(pydantic.BaseModel):
name: str
username: str
description: str
avatar_url: str
num_agents: int
agent_rating: float
agent_runs: int
is_featured: bool
class CreatorsResponse(pydantic.BaseModel):
creators: List[Creator]
pagination: Pagination
class CreatorDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
agent_rating: float
agent_runs: int
top_categories: list[str]
class Profile(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
is_featured: bool = False
class StoreSubmission(pydantic.BaseModel):
agent_id: str
agent_version: int
name: str
sub_heading: str
slug: str
description: str
image_urls: list[str]
date_submitted: datetime.datetime
status: prisma.enums.SubmissionStatus
runs: int
rating: float
store_listing_version_id: str | None = None
version: int | None = None # Actual version number from the database
reviewer_id: str | None = None
review_comments: str | None = None # External comments visible to creator
internal_comments: str | None = None # Private notes for admin use only
reviewed_at: datetime.datetime | None = None
changes_summary: str | None = None
reviewer_id: str | None = None
review_comments: str | None = None # External comments visible to creator
internal_comments: str | None = None # Private notes for admin use only
reviewed_at: datetime.datetime | None = None
changes_summary: str | None = None
class StoreSubmissionsResponse(pydantic.BaseModel):
submissions: list[StoreSubmission]
pagination: Pagination
class StoreListingWithVersions(pydantic.BaseModel):
"""A store listing with its version history"""
listing_id: str
slug: str
agent_id: str
agent_version: int
active_version_id: str | None = None
has_approved_version: bool = False
creator_email: str | None = None
latest_version: StoreSubmission | None = None
versions: list[StoreSubmission] = []
class StoreListingsWithVersionsResponse(pydantic.BaseModel):
"""Response model for listings with version history"""
listings: list[StoreListingWithVersions]
pagination: Pagination
class StoreSubmissionRequest(pydantic.BaseModel):
agent_id: str
agent_version: int
slug: str
name: str
sub_heading: str
video_url: str | None = None
image_urls: list[str] = []
description: str = ""
categories: list[str] = []
changes_summary: str | None = None
class ProfileDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str | None = None
class StoreReview(pydantic.BaseModel):
score: int
comments: str | None = None
class StoreReviewCreate(pydantic.BaseModel):
store_listing_version_id: str
score: int
comments: str | None = None
class ReviewSubmissionRequest(pydantic.BaseModel):
store_listing_version_id: str
is_approved: bool
comments: str # External comments visible to creator
internal_comments: str | None = None # Private admin notes
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.common.global_state import clear_session as clear_session
from keras.src.backend.common.keras_tensor import (
is_keras_tensor as is_keras_tensor,
)
from keras.src.backend.common.variables import (
standardize_dtype as standardize_dtype,
)
from keras.src.layers.preprocessing.feature_space import (
FeatureSpace as FeatureSpace,
)
from keras.src.ops.operation_utils import get_source_inputs as get_source_inputs
from keras.src.saving.object_registration import (
CustomObjectScope as CustomObjectScope,
)
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import (
get_custom_objects as get_custom_objects,
)
from keras.src.saving.object_registration import (
get_registered_name as get_registered_name,
)
from keras.src.saving.object_registration import (
get_registered_object as get_registered_object,
)
from keras.src.saving.object_registration import (
register_keras_serializable as register_keras_serializable,
)
from keras.src.saving.serialization_lib import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.saving.serialization_lib import (
serialize_keras_object as serialize_keras_object,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
pack_x_y_sample_weight as pack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
unpack_x_y_sample_weight as unpack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as PyDataset,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as Sequence,
)
from keras.src.utils.audio_dataset_utils import (
audio_dataset_from_directory as audio_dataset_from_directory,
)
from keras.src.utils.config import Config as Config
from keras.src.utils.dataset_utils import split_dataset as split_dataset
from keras.src.utils.file_utils import get_file as get_file
from keras.src.utils.image_dataset_utils import (
image_dataset_from_directory as image_dataset_from_directory,
)
from keras.src.utils.image_utils import array_to_img as array_to_img
from keras.src.utils.image_utils import img_to_array as img_to_array
from keras.src.utils.image_utils import load_img as load_img
from keras.src.utils.image_utils import save_img as save_img
from keras.src.utils.io_utils import (
disable_interactive_logging as disable_interactive_logging,
)
from keras.src.utils.io_utils import (
enable_interactive_logging as enable_interactive_logging,
)
from keras.src.utils.io_utils import (
is_interactive_logging_enabled as is_interactive_logging_enabled,
)
from keras.src.utils.model_visualization import model_to_dot as model_to_dot
from keras.src.utils.model_visualization import plot_model as plot_model
from keras.src.utils.numerical_utils import normalize as normalize
from keras.src.utils.numerical_utils import to_categorical as to_categorical
from keras.src.utils.progbar import Progbar as Progbar
from keras.src.utils.rng_utils import set_random_seed as set_random_seed
from keras.src.utils.sequence_utils import pad_sequences as pad_sequences
from keras.src.utils.text_dataset_utils import (
text_dataset_from_directory as text_dataset_from_directory,
)
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array as timeseries_dataset_from_array,
)
from keras.utils import bounding_boxes as bounding_boxes
from keras.utils import legacy as legacy
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import bounding_boxes
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.variables import standardize_dtype
from keras.src.layers.preprocessing.feature_space import FeatureSpace
from keras.src.ops.operation_utils import get_source_inputs
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
from keras.src.trainers.data_adapters.data_adapter_utils import (
pack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
unpack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as Sequence,
)
from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory
from keras.src.utils.config import Config
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.file_utils import get_file
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.model_visualization import model_to_dot
from keras.src.utils.model_visualization import plot_model
from keras.src.utils.numerical_utils import normalize
from keras.src.utils.numerical_utils import to_categorical
from keras.src.utils.progbar import Progbar
from keras.src.utils.rng_utils import set_random_seed
from keras.src.utils.sequence_utils import pad_sequences
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
import asyncio
import random
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
class RequestStreamerWrapper:
def __init__(self, num_requests, prefetch, iterate_sync_in_thread):
self.num_requests = num_requests
self.requests_handled = []
self.results_handled = []
self.request_ids = [random_identity() for _ in range(num_requests)]
self.response_ids = []
args = Namespace()
args.prefetch = prefetch
self.streamer = RequestStreamer(
request_handler=self.request_handler_fn,
result_handler=self.result_handle_fn,
end_of_iter_handler=self.end_of_iter_fn,
prefetch=getattr(args, 'prefetch', 0),
iterate_sync_in_thread=iterate_sync_in_thread
)
def request_handler_fn(self, request):
self.requests_handled.append(request)
async def task():
rand_sleep = random.uniform(0.1, 0.6)
await asyncio.sleep(rand_sleep)
docs = request.docs
docs[0].tags['request_handled'] = True
request.data.docs = docs
return request
future = asyncio.ensure_future(task())
return future, None
def result_handle_fn(self, result):
self.results_handled.append(result)
assert isinstance(result, DataRequest)
docs = result.docs
docs[0].tags['result_handled'] = True
result.data.docs = docs
return result
def end_of_iter_fn(self):
# with a sync generator, iteration
assert len(self.requests_handled) == self.num_requests
assert len(self.results_handled) <= self.num_requests
def _yield_data_request(self, i):
req = DataRequest()
req.header.request_id = self.request_ids[i]
da = DocumentArray()
da.append(Document())
req.data.docs = da
return req
def _get_sync_requests_iterator(self):
for i in range(self.num_requests):
yield self._yield_data_request(i)
async def _get_async_requests_iterator(self):
for i in range(self.num_requests):
yield self._yield_data_request(i)
await asyncio.sleep(0.1)
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('async_iterator', [False, True])
@pytest.mark.parametrize('results_in_order', [False, True])
@pytest.mark.parametrize('iterate_sync_in_thread', [False, True])
async def test_request_streamer(
prefetch, num_requests, async_iterator, results_in_order, iterate_sync_in_thread
):
test_streamer = RequestStreamerWrapper(num_requests, prefetch, iterate_sync_in_thread)
streamer = test_streamer.streamer
it = (
test_streamer._get_async_requests_iterator()
if async_iterator
else test_streamer._get_sync_requests_iterator()
)
response = streamer.stream(request_iterator=it, results_in_order=results_in_order)
num_responses = 0
async for r in response:
test_streamer.response_ids.append(r.header.request_id)
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
assert len(test_streamer.request_ids) == len(test_streamer.response_ids)
if results_in_order:
for req_id, resp_id in zip(
test_streamer.request_ids, test_streamer.response_ids
):
assert req_id == resp_id
@pytest.mark.asyncio
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('iterate_sync_in_thread', [False, True])
async def test_request_streamer_process_single_data(monkeypatch, num_requests, iterate_sync_in_thread):
test_streamer = RequestStreamerWrapper(num_requests, 0, iterate_sync_in_thread)
streamer = test_streamer.streamer
def end_of_iter_fn():
# bypass some assertions in RequestStreamerWrapper.end_of_iter_fn
pass
monkeypatch.setattr(streamer, '_end_of_iter_handler', end_of_iter_fn)
it = test_streamer._get_sync_requests_iterator()
num_responses = 0
for req in it:
r = await streamer.process_single_data(request=req)
test_streamer.response_ids.append(r.header.request_id)
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
assert len(test_streamer.request_ids) == len(test_streamer.response_ids)
|
import asyncio
import random
import pytest
from jina import Document, DocumentArray
from jina.helper import Namespace, random_identity
from jina.serve.stream import RequestStreamer
from jina.types.request.data import DataRequest
class RequestStreamerWrapper:
def __init__(self, num_requests, prefetch):
self.num_requests = num_requests
self.requests_handled = []
self.results_handled = []
self.request_ids = [random_identity() for _ in range(num_requests)]
self.response_ids = []
args = Namespace()
args.prefetch = prefetch
self.streamer = RequestStreamer(
request_handler=self.request_handler_fn,
result_handler=self.result_handle_fn,
end_of_iter_handler=self.end_of_iter_fn,
prefetch=getattr(args, 'prefetch', 0),
)
def request_handler_fn(self, request):
self.requests_handled.append(request)
async def task():
rand_sleep = random.uniform(0.1, 0.6)
await asyncio.sleep(rand_sleep)
docs = request.docs
docs[0].tags['request_handled'] = True
request.data.docs = docs
return request
future = asyncio.ensure_future(task())
return future, None
def result_handle_fn(self, result):
self.results_handled.append(result)
assert isinstance(result, DataRequest)
docs = result.docs
docs[0].tags['result_handled'] = True
result.data.docs = docs
return result
def end_of_iter_fn(self):
# with a sync generator, iteration
assert len(self.requests_handled) == self.num_requests
assert len(self.results_handled) <= self.num_requests
def _yield_data_request(self, i):
req = DataRequest()
req.header.request_id = self.request_ids[i]
da = DocumentArray()
da.append(Document())
req.data.docs = da
return req
def _get_sync_requests_iterator(self):
for i in range(self.num_requests):
yield self._yield_data_request(i)
async def _get_async_requests_iterator(self):
for i in range(self.num_requests):
yield self._yield_data_request(i)
await asyncio.sleep(0.1)
@pytest.mark.asyncio
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('num_requests', [1, 5, 13])
@pytest.mark.parametrize('async_iterator', [False, True])
@pytest.mark.parametrize('results_in_order', [False, True])
async def test_request_streamer(
prefetch, num_requests, async_iterator, results_in_order
):
test_streamer = RequestStreamerWrapper(num_requests, prefetch)
streamer = test_streamer.streamer
it = (
test_streamer._get_async_requests_iterator()
if async_iterator
else test_streamer._get_sync_requests_iterator()
)
response = streamer.stream(request_iterator=it, results_in_order=results_in_order)
num_responses = 0
async for r in response:
test_streamer.response_ids.append(r.header.request_id)
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
assert len(test_streamer.request_ids) == len(test_streamer.response_ids)
if results_in_order:
for req_id, resp_id in zip(
test_streamer.request_ids, test_streamer.response_ids
):
assert req_id == resp_id
@pytest.mark.asyncio
@pytest.mark.parametrize('num_requests', [1, 5, 13])
async def test_request_streamer_process_single_data(monkeypatch, num_requests):
test_streamer = RequestStreamerWrapper(num_requests, 0)
streamer = test_streamer.streamer
def end_of_iter_fn():
# bypass some assertions in RequestStreamerWrapper.end_of_iter_fn
pass
monkeypatch.setattr(streamer, '_end_of_iter_handler', end_of_iter_fn)
it = test_streamer._get_sync_requests_iterator()
num_responses = 0
for req in it:
r = await streamer.process_single_data(request=req)
test_streamer.response_ids.append(r.header.request_id)
num_responses += 1
assert r.docs[0].tags['request_handled']
assert r.docs[0].tags['result_handled']
assert num_responses == num_requests
assert len(test_streamer.request_ids) == len(test_streamer.response_ids)
|
import pathlib
from argparse import ArgumentParser
import sentencepiece as spm
from lightning import ConformerRNNTModule
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from transforms import get_data_module
def run_train(args):
seed_everything(1)
checkpoint_dir = args.exp_dir / "checkpoints"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
train_checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/train_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
train_checkpoint,
lr_monitor,
]
trainer = Trainer(
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.nodes,
devices=args.gpus,
accelerator="gpu",
strategy=DDPStrategy(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
gradient_clip_val=10.0,
)
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
model = ConformerRNNTModule(sp_model)
data_module = get_data_module(str(args.librispeech_path), str(args.global_stats_path), str(args.sp_model_path))
trainer.fit(model, data_module, ckpt_path=args.checkpoint_path)
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint-path",
default=None,
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
)
parser.add_argument(
"--exp-dir",
default=pathlib.Path("./exp"),
type=pathlib.Path,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--librispeech-path",
type=pathlib.Path,
help="Path to LibriSpeech datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=120,
type=int,
help="Number of epochs to train for. (Default: 120)",
)
args = parser.parse_args()
run_train(args)
if __name__ == "__main__":
cli_main()
|
import pathlib
from argparse import ArgumentParser
import sentencepiece as spm
from lightning import ConformerRNNTModule
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.plugins import DDPPlugin
from transforms import get_data_module
def run_train(args):
seed_everything(1)
checkpoint_dir = args.exp_dir / "checkpoints"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
train_checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/train_loss",
mode="min",
save_top_k=5,
save_weights_only=False,
verbose=True,
)
lr_monitor = LearningRateMonitor(logging_interval="step")
callbacks = [
checkpoint,
train_checkpoint,
lr_monitor,
]
trainer = Trainer(
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
num_nodes=args.nodes,
gpus=args.gpus,
accelerator="gpu",
strategy=DDPPlugin(find_unused_parameters=False),
callbacks=callbacks,
reload_dataloaders_every_n_epochs=1,
gradient_clip_val=10.0,
)
sp_model = spm.SentencePieceProcessor(model_file=str(args.sp_model_path))
model = ConformerRNNTModule(sp_model)
data_module = get_data_module(str(args.librispeech_path), str(args.global_stats_path), str(args.sp_model_path))
trainer.fit(model, data_module, ckpt_path=args.checkpoint_path)
def cli_main():
parser = ArgumentParser()
parser.add_argument(
"--checkpoint-path",
default=None,
type=pathlib.Path,
help="Path to checkpoint to use for evaluation.",
)
parser.add_argument(
"--exp-dir",
default=pathlib.Path("./exp"),
type=pathlib.Path,
help="Directory to save checkpoints and logs to. (Default: './exp')",
)
parser.add_argument(
"--global-stats-path",
default=pathlib.Path("global_stats.json"),
type=pathlib.Path,
help="Path to JSON file containing feature means and stddevs.",
)
parser.add_argument(
"--librispeech-path",
type=pathlib.Path,
help="Path to LibriSpeech datasets.",
required=True,
)
parser.add_argument(
"--sp-model-path",
type=pathlib.Path,
help="Path to SentencePiece model.",
required=True,
)
parser.add_argument(
"--nodes",
default=4,
type=int,
help="Number of nodes to use for training. (Default: 4)",
)
parser.add_argument(
"--gpus",
default=8,
type=int,
help="Number of GPUs per node to use for training. (Default: 8)",
)
parser.add_argument(
"--epochs",
default=120,
type=int,
help="Number of epochs to train for. (Default: 120)",
)
args = parser.parse_args()
run_train(args)
if __name__ == "__main__":
cli_main()
|
_base_ = './retinanet_r50_fpn_1x_coco_v1.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
# use caffe img_norm
mean=[102.9801, 115.9465, 122.7717],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')))
|
_base_ = './retinanet_r50_fpn_1x_coco_v1.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
import logging
from typing import Annotated
from autogpt_libs.auth.middleware import APIKeyValidator
from fastapi import APIRouter, Body, Depends, HTTPException, Query
from fastapi.responses import JSONResponse
from backend.data.user import (
get_user_by_email,
set_user_email_verification,
unsubscribe_user_by_token,
)
from backend.server.routers.postmark.models import (
PostmarkBounceEnum,
PostmarkBounceWebhook,
PostmarkClickWebhook,
PostmarkDeliveryWebhook,
PostmarkOpenWebhook,
PostmarkSpamComplaintWebhook,
PostmarkSubscriptionChangeWebhook,
PostmarkWebhook,
)
from backend.util.settings import Settings
settings = Settings()
postmark_validator = APIKeyValidator(
"X-Postmark-Webhook-Token",
settings.secrets.postmark_webhook_token,
)
router = APIRouter()
logger = logging.getLogger(__name__)
@router.post("/unsubscribe")
async def unsubscribe_via_one_click(token: Annotated[str, Query()]):
logger.info("Received unsubscribe request from One Click Unsubscribe")
try:
await unsubscribe_user_by_token(token)
except Exception as e:
logger.exception("Unsubscribe failed: %s", e)
raise HTTPException(
status_code=500,
detail={"message": str(e), "hint": "Verify Postmark token settings."},
)
return JSONResponse(status_code=200, content={"status": "ok"})
@router.post("/", dependencies=[Depends(postmark_validator.get_dependency())])
async def postmark_webhook_handler(
webhook: Annotated[
PostmarkWebhook,
Body(discriminator="RecordType"),
]
):
logger.info(f"Received webhook from Postmark: {webhook}")
match webhook:
case PostmarkDeliveryWebhook():
delivery_handler(webhook)
case PostmarkBounceWebhook():
await bounce_handler(webhook)
case PostmarkSpamComplaintWebhook():
spam_handler(webhook)
case PostmarkOpenWebhook():
open_handler(webhook)
case PostmarkClickWebhook():
click_handler(webhook)
case PostmarkSubscriptionChangeWebhook():
subscription_handler(webhook)
case _:
logger.warning(
"Unhandled Postmark webhook type %s. Update handler mappings.",
type(webhook),
)
return
async def bounce_handler(event: PostmarkBounceWebhook):
logger.info(f"Bounce handler {event=}")
if event.TypeCode in [
PostmarkBounceEnum.Transient,
PostmarkBounceEnum.SoftBounce,
PostmarkBounceEnum.DnsError,
]:
logger.info(
f"Softish bounce: {event.TypeCode} for {event.Email}, not setting email verification to false"
)
return
logger.info(f"{event.Email=}")
user = await get_user_by_email(event.Email)
if not user:
logger.warning(
"Received bounce for unknown email %s. Ensure user records are current.",
event.Email,
)
return
await set_user_email_verification(user.id, False)
logger.debug(f"Setting email verification to false for user: {user.id}")
def spam_handler(event: PostmarkSpamComplaintWebhook):
logger.info("Spam handler")
pass
def delivery_handler(event: PostmarkDeliveryWebhook):
logger.info("Delivery handler")
pass
def open_handler(event: PostmarkOpenWebhook):
logger.info("Open handler")
pass
def click_handler(event: PostmarkClickWebhook):
logger.info("Click handler")
pass
def subscription_handler(event: PostmarkSubscriptionChangeWebhook):
logger.info("Subscription handler")
pass
|
import logging
from typing import Annotated
from autogpt_libs.auth.middleware import APIKeyValidator
from fastapi import APIRouter, Body, Depends, HTTPException, Query
from fastapi.responses import JSONResponse
from backend.data.user import (
get_user_by_email,
set_user_email_verification,
unsubscribe_user_by_token,
)
from backend.server.routers.postmark.models import (
PostmarkBounceEnum,
PostmarkBounceWebhook,
PostmarkClickWebhook,
PostmarkDeliveryWebhook,
PostmarkOpenWebhook,
PostmarkSpamComplaintWebhook,
PostmarkSubscriptionChangeWebhook,
PostmarkWebhook,
)
from backend.util.settings import Settings
settings = Settings()
postmark_validator = APIKeyValidator(
"X-Postmark-Webhook-Token",
settings.secrets.postmark_webhook_token,
)
router = APIRouter()
logger = logging.getLogger(__name__)
@router.post("/unsubscribe")
async def unsubscribe_via_one_click(token: Annotated[str, Query()]):
logger.info(f"Received unsubscribe request from One Click Unsubscribe: {token}")
try:
await unsubscribe_user_by_token(token)
except Exception as e:
logger.exception("Unsubscribe token %s failed: %s", token, e)
raise HTTPException(
status_code=500,
detail={"message": str(e), "hint": "Verify Postmark token settings."},
)
return JSONResponse(status_code=200, content={"status": "ok"})
@router.post("/", dependencies=[Depends(postmark_validator.get_dependency())])
async def postmark_webhook_handler(
webhook: Annotated[
PostmarkWebhook,
Body(discriminator="RecordType"),
]
):
logger.info(f"Received webhook from Postmark: {webhook}")
match webhook:
case PostmarkDeliveryWebhook():
delivery_handler(webhook)
case PostmarkBounceWebhook():
await bounce_handler(webhook)
case PostmarkSpamComplaintWebhook():
spam_handler(webhook)
case PostmarkOpenWebhook():
open_handler(webhook)
case PostmarkClickWebhook():
click_handler(webhook)
case PostmarkSubscriptionChangeWebhook():
subscription_handler(webhook)
case _:
logger.warning(
"Unhandled Postmark webhook type %s. Update handler mappings.",
type(webhook),
)
return
async def bounce_handler(event: PostmarkBounceWebhook):
logger.info(f"Bounce handler {event=}")
if event.TypeCode in [
PostmarkBounceEnum.Transient,
PostmarkBounceEnum.SoftBounce,
PostmarkBounceEnum.DnsError,
]:
logger.info(
f"Softish bounce: {event.TypeCode} for {event.Email}, not setting email verification to false"
)
return
logger.info(f"{event.Email=}")
user = await get_user_by_email(event.Email)
if not user:
logger.warning(
"Received bounce for unknown email %s. Ensure user records are current.",
event.Email,
)
return
await set_user_email_verification(user.id, False)
logger.debug(f"Setting email verification to false for user: {user.id}")
def spam_handler(event: PostmarkSpamComplaintWebhook):
logger.info("Spam handler")
pass
def delivery_handler(event: PostmarkDeliveryWebhook):
logger.info("Delivery handler")
pass
def open_handler(event: PostmarkOpenWebhook):
logger.info("Open handler")
pass
def click_handler(event: PostmarkClickWebhook):
logger.info("Click handler")
pass
def subscription_handler(event: PostmarkSubscriptionChangeWebhook):
logger.info("Subscription handler")
pass
|
import os
from pathlib import Path
import pytest
from jina import Flow
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.deployments import Deployment
from jina.parsers import set_deployment_parser
from jina.serve.executors import BaseExecutor
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_simple_use_abs_import_shall_fail():
with pytest.raises(ModuleNotFoundError):
from .dummyhub_abs import DummyHubExecutorAbs
DummyHubExecutorAbs()
with pytest.raises(RuntimeFailToStart):
with Flow().add(uses='DummyHubExecutorAbs'):
pass
def test_simple_use_relative_import():
from .dummyhub import DummyHubExecutor
DummyHubExecutor()
with Flow().add(uses='DummyHubExecutor'):
pass
def test_use_from_local_dir_exe_level():
with BaseExecutor.load_config('dummyhub/config.yml'):
pass
def test_use_from_local_dir_deployment_level():
a = set_deployment_parser().parse_args(['--uses', 'dummyhub/config.yml'])
with Deployment(a):
pass
def test_use_from_local_dir_flow_level():
with Flow().add(uses='dummyhub/config.yml'):
pass
@pytest.fixture
def local_hub_executor(tmpdir):
from hubble.executor import HubExecutor, helper, hubapi
pkg_path = Path(__file__).parent / 'dummyhub'
stream_data = helper.archive_package(pkg_path)
with open(tmpdir / 'dummy_test.zip', 'wb') as temp_zip_file:
temp_zip_file.write(stream_data.getvalue())
hubapi.install_local(
Path(tmpdir) / 'dummy_test.zip', HubExecutor(uuid='hello', tag='v0')
)
@pytest.mark.parametrize('uses', ['jinahub://hello', 'jinaai://jina-ai/hello'])
def test_use_from_local_hub_deployment_level(
mocker, monkeypatch, local_hub_executor, uses
):
from hubble.executor.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
secret=None,
force=False,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
a = set_deployment_parser().parse_args(['--uses', uses])
with Deployment(a):
pass
@pytest.mark.parametrize('uses', ['jinahub://hello', 'jinaai://jina-ai/hello'])
def test_use_from_local_hub_flow_level(mocker, monkeypatch, local_hub_executor, uses):
from hubble.executor.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
secret=None,
force=False,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
with Flow().add(uses=uses, install_requirements=True):
pass
|
import os
from pathlib import Path
import pytest
from jina import Flow
from jina.excepts import RuntimeFailToStart
from jina.orchestrate.deployments import Deployment
from jina.parsers import set_deployment_parser
from jina.serve.executors import BaseExecutor
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_simple_use_abs_import_shall_fail():
with pytest.raises(ModuleNotFoundError):
from .dummyhub_abs import DummyHubExecutorAbs
DummyHubExecutorAbs()
with pytest.raises(RuntimeFailToStart):
with Flow().add(uses='DummyHubExecutorAbs'):
pass
def test_simple_use_relative_import():
from .dummyhub import DummyHubExecutor
DummyHubExecutor()
with Flow().add(uses='DummyHubExecutor'):
pass
def test_use_from_local_dir_exe_level():
with BaseExecutor.load_config('dummyhub/config.yml'):
pass
def test_use_from_local_dir_deployment_level():
a = set_deployment_parser().parse_args(['--uses', 'dummyhub/config.yml'])
with Deployment(a):
pass
def test_use_from_local_dir_flow_level():
with Flow().add(uses='dummyhub/config.yml'):
pass
@pytest.fixture
def local_hub_executor(tmpdir):
from hubble.executor import HubExecutor, helper, hubapi
pkg_path = Path(__file__).parent / 'dummyhub'
stream_data = helper.archive_package(pkg_path)
with open(tmpdir / 'dummy_test.zip', 'wb') as temp_zip_file:
temp_zip_file.write(stream_data.getvalue())
hubapi.install_local(
Path(tmpdir) / 'dummy_test.zip', HubExecutor(uuid='hello', tag='v0')
)
def test_use_from_local_hub_deployment_level(mocker, monkeypatch, local_hub_executor):
from hubble.executor.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
secret=None,
force=False,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
a = set_deployment_parser().parse_args(['--uses', 'jinahub://hello'])
with Deployment(a):
pass
def test_use_from_local_hub_flow_level(mocker, monkeypatch, local_hub_executor):
from hubble.executor.hubio import HubExecutor, HubIO
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
secret=None,
force=False,
):
mock(name=name)
return (
HubExecutor(
uuid='hello',
name='alias_dummy',
tag='v0',
image_name='jinahub/pod.dummy_mwu_encoder',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
with Flow().add(uses='jinahub://hello', install_requirements=True):
pass
|
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import CSVDictParser, IterDataPipe, Mapper
from torchvision.prototype.datapoints import Image, Label
from torchvision.prototype.datasets.utils import Dataset, KaggleDownloadResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from .._api import register_dataset, register_info
NAME = "fer2013"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=("angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"))
@register_dataset(NAME)
class FER2013(Dataset):
"""FER 2013 Dataset
homepage="https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_CHECKSUMS = {
"train": "a2b7c9360cc0b38d21187e5eece01c2799fce5426cdeecf746889cc96cda2d10",
"test": "dec8dfe8021e30cd6704b85ec813042b4a5d99d81cb55e023291a94104f575c3",
}
def _resources(self) -> List[OnlineResource]:
archive = KaggleDownloadResource(
"https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge",
file_name=f"{self._split}.csv.zip",
sha256=self._CHECKSUMS[self._split],
)
return [archive]
def _prepare_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:
label_id = data.get("emotion")
return dict(
image=Image(torch.tensor([int(idx) for idx in data["pixels"].split()], dtype=torch.uint8).reshape(48, 48)),
label=Label(int(label_id), categories=self._categories) if label_id is not None else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVDictParser(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 28_709 if self._split == "train" else 3_589
|
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import CSVDictParser, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, KaggleDownloadResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.features import Image, Label
from .._api import register_dataset, register_info
NAME = "fer2013"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=("angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"))
@register_dataset(NAME)
class FER2013(Dataset):
"""FER 2013 Dataset
homepage="https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_CHECKSUMS = {
"train": "a2b7c9360cc0b38d21187e5eece01c2799fce5426cdeecf746889cc96cda2d10",
"test": "dec8dfe8021e30cd6704b85ec813042b4a5d99d81cb55e023291a94104f575c3",
}
def _resources(self) -> List[OnlineResource]:
archive = KaggleDownloadResource(
"https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge",
file_name=f"{self._split}.csv.zip",
sha256=self._CHECKSUMS[self._split],
)
return [archive]
def _prepare_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:
label_id = data.get("emotion")
return dict(
image=Image(torch.tensor([int(idx) for idx in data["pixels"].split()], dtype=torch.uint8).reshape(48, 48)),
label=Label(int(label_id), categories=self._categories) if label_id is not None else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVDictParser(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 28_709 if self._split == "train" else 3_589
|
import math
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Flatten")
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
Note: If inputs are shaped `(batch,)` without a feature axis, then
flattening adds an extra channel dimension and output shape is `(batch, 1)`.
Args:
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, ..., channels)` while `"channels_first"` corresponds to
inputs with shape `(batch, channels, ...)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Example:
>>> x = keras.Input(shape=(10, 64))
>>> y = keras.layers.Flatten()(x)
>>> y.shape
(None, 640)
"""
def __init__(self, data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.input_spec = InputSpec(min_ndim=1)
self._channels_first = self.data_format == "channels_first"
def call(self, inputs):
input_shape = ops.shape(inputs)
rank = len(input_shape)
if self._channels_first and rank > 1:
# Switch to channels-last format.
inputs = ops.transpose(inputs, axes=(0, *range(2, rank), 1))
non_batch_dims = input_shape[1:]
if len(non_batch_dims) == 0:
flattened_dim = 1
elif any(not isinstance(d, int) for d in non_batch_dims):
flattened_dim = -1
else:
flattened_dim = math.prod(non_batch_dims)
return ops.reshape(inputs, (input_shape[0], flattened_dim))
def compute_output_shape(self, input_shape):
non_batch_dims = input_shape[1:]
if len(non_batch_dims) == 0:
flattened_dim = 1
elif any(d is None for d in non_batch_dims):
# NB: we cannot use the shorter `None in non_batch_dims` here b/c
# torchdynamo errors when calling `__contains__` op with
# a constant (in this case `None`) operand since it assumes
# that the elements in the collection are also `ConstantVariable`s
# but tensor shapes can be `SymNodeVariable`s (e.g. `SymInt`)
flattened_dim = None
else:
flattened_dim = math.prod(non_batch_dims)
return (input_shape[0], flattened_dim)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def get_config(self):
config = {"data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
|
import math
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Flatten")
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
Note: If inputs are shaped `(batch,)` without a feature axis, then
flattening adds an extra channel dimension and output shape is `(batch, 1)`.
Args:
data_format: A string, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, ..., channels)` while `"channels_first"` corresponds to
inputs with shape `(batch, channels, ...)`.
When unspecified, uses `image_data_format` value found in your Keras
config file at `~/.keras/keras.json` (if exists). Defaults to
`"channels_last"`.
Example:
>>> x = keras.Input(shape=(10, 64))
>>> y = keras.layers.Flatten()(x)
>>> y.shape
(None, 640)
"""
def __init__(self, data_format=None, **kwargs):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.input_spec = InputSpec(min_ndim=1)
self._channels_first = self.data_format == "channels_first"
def call(self, inputs):
input_shape = inputs.shape
rank = len(input_shape)
if self._channels_first and rank > 1:
# Switch to channels-last format.
inputs = ops.transpose(inputs, axes=(0, *range(2, rank), 1))
output_shape = tuple(
dim if dim is not None else -1
for dim in self.compute_output_shape(input_shape)
)
return ops.reshape(inputs, output_shape)
def compute_output_shape(self, input_shape):
non_batch_dims = input_shape[1:]
if len(non_batch_dims) == 0:
flattened_dim = 1
elif any(d is None for d in non_batch_dims):
# NB: we cannot use the shorter `None in non_batch_dims` here b/c
# torchdynamo errors when calling `__contains__` op with
# a constant (in this case `None`) operand since it assumes
# that the elements in the collection are also `ConstantVariable`s
# but tensor shapes can be `SymNodeVariable`s (e.g. `SymInt`)
flattened_dim = None
else:
flattened_dim = math.prod(non_batch_dims)
return (input_shape[0], flattened_dim)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def get_config(self):
config = {"data_format": self.data_format}
base_config = super().get_config()
return {**base_config, **config}
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, Optional
import numpy as np
import paddlehub as hub
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
class TextPaddleEncoder(Executor):
"""
Encode an array of string in size `B` into an ndarray in size `B x D`
The ndarray potentially is BatchSize x (Channel x Height x Width)
Internally, :class:`TextPaddlehubEncoder` wraps the Ernie module from paddlehub.
https://github.com/PaddlePaddle/PaddleHub
For models' details refer to
https://www.paddlepaddle.org.cn/hublist?filter=en_category&value=SemanticModel
"""
def __init__(
self,
model_name: Optional[str] = 'ernie_tiny',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param model_name: the name of the model. Supported models include
``ernie``, ``ernie_tiny``, ``ernie_v2_eng_base``, ``ernie_v2_eng_large``,
``bert_chinese_L-12_H-768_A-12``, ``bert_multi_cased_L-12_H-768_A-12``,
``bert_multi_uncased_L-12_H-768_A-12``, ``bert_uncased_L-12_H-768_A-12``,
``bert_uncased_L-24_H-1024_A-16``, ``chinese-bert-wwm``,
``chinese-bert-wwm-ext``, ``chinese-electra-base``,
``chinese-electra-small``, ``chinese-roberta-wwm-ext``,
``chinese-roberta-wwm-ext-large``, ``rbt3``, ``rbtl3``
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
:param device: Device to be used. Use 'gpu' for GPU or use 'cpu' for CPU.
"""
super().__init__(*args, **kwargs)
self.device = device
self.model = hub.Module(name=model_name)
self.batch_size = batch_size
self.traversal_paths = traversal_paths
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: Dict = {}, **kwargs
):
"""Encode doc content into vector representation.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.traversal_paths),
batch_size=parameters.get('batch_size', self.batch_size),
needs_attr='text',
)
for batch_of_docs in document_batches_generator:
pooled_features = []
contents = [[doc.content] for doc in batch_of_docs]
results = self.model.get_embedding(
contents, use_gpu=self.device == 'gpu'
)
for pooled_feature, _ in results:
pooled_features.append(pooled_feature)
for doc, feature in zip(batch_of_docs, pooled_features):
doc.embedding = np.asarray(feature)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, Optional
import numpy as np
import paddlehub as hub
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
class TextPaddleEncoder(Executor):
"""
Encode an array of string in size `B` into an ndarray in size `B x D`
The ndarray potentially is BatchSize x (Channel x Height x Width)
Internally, :class:`TextPaddlehubEncoder` wraps the Ernie module from paddlehub.
https://github.com/PaddlePaddle/PaddleHub
For models' details refer to
https://www.paddlepaddle.org.cn/hublist?filter=en_category&value=SemanticModel
"""
def __init__(
self,
model_name: Optional[str] = 'ernie_tiny',
default_traversal_paths: Iterable[str] = ('r',),
default_batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param model_name: the name of the model. Supported models include
``ernie``, ``ernie_tiny``, ``ernie_v2_eng_base``, ``ernie_v2_eng_large``,
``bert_chinese_L-12_H-768_A-12``, ``bert_multi_cased_L-12_H-768_A-12``,
``bert_multi_uncased_L-12_H-768_A-12``, ``bert_uncased_L-12_H-768_A-12``,
``bert_uncased_L-24_H-1024_A-16``, ``chinese-bert-wwm``,
``chinese-bert-wwm-ext``, ``chinese-electra-base``,
``chinese-electra-small``, ``chinese-roberta-wwm-ext``,
``chinese-roberta-wwm-ext-large``, ``rbt3``, ``rbtl3``
:param default_batch_size: fallback batch size in case there is not batch size sent in the request
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param device: Device to be used. Use 'gpu' for GPU or use 'cpu' for CPU.
"""
super().__init__(*args, **kwargs)
self.device = device
self.model = hub.Module(name=model_name)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
@requests
def encode(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""Encode doc content into vector representation.
:param docs: `DocumentArray` passed from the previous ``Executor``.
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
for batch_of_docs in document_batches_generator:
pooled_features = []
contents = [[doc.content] for doc in batch_of_docs]
results = self.model.get_embedding(
contents, use_gpu=self.device == 'gpu'
)
for pooled_feature, _ in results:
pooled_features.append(pooled_feature)
for doc, feature in zip(batch_of_docs, pooled_features):
doc.embedding = np.asarray(feature)
|
_base_ = './faster-rcnn_regnetx-3.2GF_fpn_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
"""
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
- validation_split
- validation_data
- callbacks
- validation_freq
- epochs
- initial_epoch
- any backend-specific concern such as distribution
PyDataset:
- num_workers
- use_multiprocessing
- max_queue_size
EpochIterator steps:
1. Look at data type and select correct DataHandler
2. Instantiate DataHandler with correct arguments
3. Raise or warn on unused arguments
4. in __iter__, iterate, either for a fixed number of steps
or until there is no data
"""
import contextlib
import warnings
from keras.src.trainers import data_adapters
class EpochIterator:
def __init__(
self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
shuffle=False,
class_weight=None,
steps_per_execution=1,
):
self.steps_per_epoch = steps_per_epoch
self.steps_per_execution = steps_per_execution
self._current_iterator = None
self._epoch_iterator = None
self._steps_seen = 0
self.data_adapter = data_adapters.get_data_adapter(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
)
self._num_batches = self.data_adapter.num_batches
def _get_iterator(self):
return self.data_adapter.get_numpy_iterator()
def _interrupted_warning(self):
warnings.warn(
"Your input ran out of data; interrupting training. "
"Make sure that your dataset or generator can generate "
"at least `steps_per_epoch * epochs` batches. "
"You may need to use the `.repeat()` "
"function when building your dataset.",
stacklevel=2,
)
def reset(self):
self._current_iterator = None
self._num_batches = self.data_adapter.num_batches
self._steps_seen = 0
self._epoch_iterator = None
def _enumerate_iterator(self):
self.data_adapter.on_epoch_begin()
steps_per_epoch = self.steps_per_epoch or self._num_batches or -1
if steps_per_epoch > 0:
if self._current_iterator is None or self.steps_per_epoch is None:
self._current_iterator = iter(self._get_iterator())
self._steps_seen = 0
for step in range(0, steps_per_epoch, self.steps_per_execution):
if self._num_batches and self._steps_seen >= self._num_batches:
if self.steps_per_epoch:
self._interrupted_warning()
break
self._steps_seen += self.steps_per_execution
yield step, self._current_iterator
if self._num_batches and self._steps_seen >= self._num_batches:
self._current_iterator = iter(self._get_iterator())
self._steps_seen = 0
else:
iterator = iter(self._get_iterator())
step = -self.steps_per_execution
while True:
step += self.steps_per_execution
self._steps_seen = step + self.steps_per_execution
yield step, iterator
self.data_adapter.on_epoch_end()
def __iter__(self):
self._epoch_iterator = self._enumerate_iterator()
return self
def __next__(self):
buffer = []
step, iterator = next(self._epoch_iterator)
with self.catch_stop_iteration():
for _ in range(self.steps_per_execution):
data = next(iterator)
buffer.append(data)
return step, buffer
if buffer:
return step, buffer
raise StopIteration
def enumerate_epoch(self):
for step, data in self:
yield step, data
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
try:
yield
except StopIteration:
if self._num_batches is None:
self._num_batches = self._steps_seen
self._interrupted_warning()
self._current_iterator = None
self.data_adapter.on_epoch_end()
@property
def num_batches(self):
if self.steps_per_epoch:
return self.steps_per_epoch
# Either copied from the data_adapter, or
# inferred at the end of an iteration.
return self._num_batches
|
"""
Separation of concerns:
DataAdapter:
- x, y
- sample_weight
- class_weight
- shuffle
- batch_size
- steps, as it relates to batch_size for array data
EpochIterator:
- whether to yield numpy or tf data
- steps
- most argument validation
Trainer:
- steps_per_execution
- validation_split
- validation_data
- callbacks
- validation_freq
- epochs
- initial_epoch
- any backend-specific concern such as distribution
PyDataset:
- num_workers
- use_multiprocessing
- max_queue_size
EpochIterator steps:
1. Look at data type and select correct DataHandler
2. Instantiate DataHandler with correct arguments
3. Raise or warn on unused arguments
4. in __iter__, iterate, either for a fixed number of steps
or until there is no data
"""
import warnings
from keras.src.trainers import data_adapters
class EpochIterator:
def __init__(
self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
shuffle=False,
class_weight=None,
steps_per_execution=1,
):
self.steps_per_epoch = steps_per_epoch
self.steps_per_execution = steps_per_execution
if steps_per_epoch:
self._current_iterator = None
self._insufficient_data = False
self.data_adapter = data_adapters.get_data_adapter(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
)
self._num_batches = self.data_adapter.num_batches
def _get_iterator(self):
return self.data_adapter.get_numpy_iterator()
def enumerate_epoch(self):
buffer = []
self.data_adapter.on_epoch_begin()
if self.steps_per_epoch:
if self._current_iterator is None:
self._current_iterator = iter(self._get_iterator())
self._insufficient_data = False
for step in range(self.steps_per_epoch):
if self._insufficient_data:
break
try:
data = next(self._current_iterator)
buffer.append(data)
if len(buffer) == self.steps_per_execution:
yield step - len(buffer) + 1, buffer
buffer = []
except (StopIteration,):
warnings.warn(
"Your input ran out of data; interrupting epoch. "
"Make sure that your dataset or generator can generate "
"at least `steps_per_epoch * epochs` batches. "
"You may need to use the `.repeat()` "
"function when building your dataset.",
stacklevel=2,
)
self._current_iterator = None
self._insufficient_data = True
if buffer:
yield step - len(buffer) + 1, buffer
else:
for step, data in enumerate(self._get_iterator()):
buffer.append(data)
if len(buffer) == self.steps_per_execution:
yield step - len(buffer) + 1, buffer
buffer = []
if buffer:
yield step - len(buffer) + 1, buffer
if not self._num_batches:
# Infer the number of batches returned by the data_adapter.
# Assumed static.
self._num_batches = step + 1
self.data_adapter.on_epoch_end()
@property
def num_batches(self):
if self.steps_per_epoch:
return self.steps_per_epoch
# Either copied from the data_adapter, or
# inferred at the end of an iteration.
return self._num_batches
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import ADE20KPanopticDataset
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler'
]
|
from __future__ import annotations
from .CrossEncoder import CrossEncoder
from .model_card import CrossEncoderModelCardData
from .trainer import CrossEncoderTrainer
from .training_args import CrossEncoderTrainingArguments
__all__ = [
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
]
|
from __future__ import annotations
from .CrossEncoder import CrossEncoder
__all__ = ["CrossEncoder"]
|
from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocList
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocArray <=0.21.
It can be useful to start migrating a codebase from v1 to v2.
Nevertheless, the API is not totally compatible with DocArray <=0.21 `Document`.
Indeed, none of the method associated with `Document` are present. Only the schema
of the data is similar.
```python
from docarray import DocList
from docarray.documents.legacy import LegacyDocument
import numpy as np
doc = LegacyDocument(text='hello')
doc.url = 'http://myimg.png'
doc.tensor = np.zeros((3, 224, 224))
doc.embedding = np.zeros((100, 1))
doc.tags['price'] = 10
doc.chunks = DocList[Document]([Document() for _ in range(10)])
doc.chunks = DocList[Document]([Document() for _ in range(10)])
```
"""
tensor: Optional[AnyTensor]
chunks: Optional[DocList[LegacyDocument]]
matches: Optional[DocList[LegacyDocument]]
blob: Optional[bytes]
text: Optional[str]
url: Optional[str]
embedding: Optional[AnyEmbedding]
tags: Dict[str, Any] = dict()
scores: Optional[Dict[str, Any]]
|
from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocList
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocArray v1.
It can be useful to start migrating a codebase from v1 to v2.
Nevertheless, the API is not totally compatible with DocArray v1 `Document`.
Indeed, none of the method associated with `Document` are present. Only the schema
of the data is similar.
```python
from docarray import DocList
from docarray.documents.legacy import LegacyDocument
import numpy as np
doc = LegacyDocument(text='hello')
doc.url = 'http://myimg.png'
doc.tensor = np.zeros((3, 224, 224))
doc.embedding = np.zeros((100, 1))
doc.tags['price'] = 10
doc.chunks = DocList[Document]([Document() for _ in range(10)])
doc.chunks = DocList[Document]([Document() for _ in range(10)])
```
"""
tensor: Optional[AnyTensor]
chunks: Optional[DocList[LegacyDocument]]
matches: Optional[DocList[LegacyDocument]]
blob: Optional[bytes]
text: Optional[str]
url: Optional[str]
embedding: Optional[AnyEmbedding]
tags: Dict[str, Any] = dict()
scores: Optional[Dict[str, Any]]
|
"""Utils for LLM Compiler."""
import ast
import re
from typing import Any, Dict, List, Sequence, Tuple, Union
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import BaseTool, adapt_to_async_tool
from .schema import (
LLMCompilerParseResult,
LLMCompilerTask,
)
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
def default_dependency_rule(idx: int, args: str) -> bool:
"""Default dependency rule."""
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
def parse_llm_compiler_action_args(args: str) -> Union[List, Tuple]:
"""Parse arguments from a string."""
# This will convert the string into a python object
# e.g. '"Ronaldo number of kids"' -> ("Ronaldo number of kids", )
# '"I can answer the question now.", [3]' -> ("I can answer the question now.", [3])
if args == "":
return ()
try:
eval_args: Union[List, Tuple, str] = ast.literal_eval(args)
except Exception:
eval_args = args
if not isinstance(eval_args, list) and not isinstance(eval_args, tuple):
new_args: Union[List, Tuple] = (eval_args,)
else:
new_args = eval_args
return new_args
def _find_tool(tool_name: str, tools: Sequence[BaseTool]) -> BaseTool:
"""
Find a tool by name.
Args:
tool_name: Name of the tool to find.
Returns:
Tool or StructuredTool.
"""
for tool in tools:
if tool.metadata.name == tool_name:
return tool
raise ValueError(f"Tool {tool_name} not found.")
def _get_dependencies_from_graph(idx: int, tool_name: str, args: str) -> List[int]:
"""Get dependencies from a graph."""
if tool_name == "join":
# depends on the previous step
dependencies = list(range(1, idx))
else:
# define dependencies based on the dependency rule in tool_definitions.py
dependencies = [i for i in range(1, idx) if default_dependency_rule(i, args)]
return dependencies
def instantiate_new_step(
tools: Sequence[BaseTool],
idx: int,
tool_name: str,
args: str,
thought: str,
) -> LLMCompilerTask:
"""Instantiate a new step."""
dependencies = _get_dependencies_from_graph(idx, tool_name, args)
args_list = parse_llm_compiler_action_args(args)
if tool_name == "join":
# tool: Optional[BaseTool] = None
# assume that the only tool that returns None is join
tool: BaseTool = FunctionTool.from_defaults(fn=lambda x: None)
else:
tool = _find_tool(tool_name, tools)
return LLMCompilerTask(
idx=idx,
name=tool_name,
tool=adapt_to_async_tool(tool),
args=args_list,
dependencies=dependencies,
# TODO: look into adding a stringify rule
# stringify_rule=stringify_rule,
thought=thought,
is_join=tool_name == "join",
)
def get_graph_dict(
parse_results: List[LLMCompilerParseResult],
tools: Sequence[BaseTool],
) -> Dict[int, Any]:
"""Get graph dict."""
graph_dict = {}
for parse_result in parse_results:
# idx = 1, function = "search", args = "Ronaldo number of kids"
# thought will be the preceding thought, if any, otherwise an empty string
# thought, idx, tool_name, args, _ = match
idx = int(parse_result.idx)
task = instantiate_new_step(
tools=tools,
idx=idx,
tool_name=parse_result.tool_name,
args=parse_result.args,
thought=parse_result.thought,
)
graph_dict[idx] = task
if task.is_join:
break
return graph_dict
def generate_context_for_replanner(
tasks: Dict[int, LLMCompilerTask], joiner_thought: str
) -> str:
"""
Generate context for replanning.
Formatted like this.
```
1. action 1
Observation: xxx
2. action 2
Observation: yyy
...
Thought: joinner_thought
```
"""
previous_plan_and_observations = "\n".join(
[
task.get_thought_action_observation(
include_action=True, include_action_idx=True
)
for task in tasks.values()
if not task.is_join
]
)
joiner_thought = f"Thought: {joiner_thought}"
# use f-string instead
return f"{previous_plan_and_observations}\n\n{joiner_thought}"
def format_contexts(contexts: Sequence[str]) -> str:
"""
Format contexts.
Taken from https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/llm_compiler.py
Contexts is a list of context.
Each context is formatted as the description of generate_context_for_replanner
"""
formatted_contexts = ""
for context in contexts:
formatted_contexts += f"Previous Plan:\n\n{context}\n\n"
formatted_contexts += "Current Plan:\n\n"
return formatted_contexts
|
"""Utils for LLM Compiler."""
import ast
import re
from typing import Any, Dict, List, Sequence, Tuple, Union
from llama_index.core.tools.function_tool import FunctionTool
from llama_index.core.tools.types import BaseTool, adapt_to_async_tool
from .schema import (
LLMCompilerParseResult,
LLMCompilerTask,
)
# $1 or ${1} -> 1
ID_PATTERN = r"\$\{?(\d+)\}?"
def default_dependency_rule(idx: int, args: str) -> bool:
"""Default dependency rule."""
matches = re.findall(ID_PATTERN, args)
numbers = [int(match) for match in matches]
return idx in numbers
def parse_llm_compiler_action_args(args: str) -> Union[List, Tuple]:
"""Parse arguments from a string."""
# This will convert the string into a python object
# e.g. '"Ronaldo number of kids"' -> ("Ronaldo number of kids", )
# '"I can answer the question now.", [3]' -> ("I can answer the question now.", [3])
if args == "":
return ()
try:
eval_args: Union[List, Tuple, str] = ast.literal_eval(args)
except Exception:
eval_args = args
if not isinstance(eval_args, list) and not isinstance(eval_args, tuple):
new_args: Union[List, Tuple] = (eval_args,)
else:
new_args = eval_args
return new_args
def _find_tool(tool_name: str, tools: Sequence[BaseTool]) -> BaseTool:
"""
Find a tool by name.
Args:
tool_name: Name of the tool to find.
Returns:
Tool or StructuredTool.
"""
for tool in tools:
if tool.metadata.name == tool_name:
return tool
raise ValueError(f"Tool {tool_name} not found.")
def _get_dependencies_from_graph(idx: int, tool_name: str, args: str) -> List[int]:
"""Get dependencies from a graph."""
if tool_name == "join":
# depends on the previous step
dependencies = list(range(1, idx))
else:
# define dependencies based on the dependency rule in tool_definitions.py
dependencies = [i for i in range(1, idx) if default_dependency_rule(i, args)]
return dependencies
def instantiate_new_step(
tools: Sequence[BaseTool],
idx: int,
tool_name: str,
args: str,
thought: str,
) -> LLMCompilerTask:
"""Instantiate a new step."""
dependencies = _get_dependencies_from_graph(idx, tool_name, args)
args_list = parse_llm_compiler_action_args(args)
if tool_name == "join":
# tool: Optional[BaseTool] = None
# assume that the only tool that returns None is join
tool: BaseTool = FunctionTool.from_defaults(fn=lambda x: None)
else:
tool = _find_tool(tool_name, tools)
return LLMCompilerTask(
idx=idx,
name=tool_name,
tool=adapt_to_async_tool(tool),
args=args_list,
dependencies=dependencies,
# TODO: look into adding a stringify rule
# stringify_rule=stringify_rule,
thought=thought,
is_join=tool_name == "join",
)
def get_graph_dict(
parse_results: List[LLMCompilerParseResult],
tools: Sequence[BaseTool],
) -> Dict[int, Any]:
"""Get graph dict."""
graph_dict = {}
for parse_result in parse_results:
# idx = 1, function = "search", args = "Ronaldo number of kids"
# thought will be the preceding thought, if any, otherwise an empty string
# thought, idx, tool_name, args, _ = match
idx = int(parse_result.idx)
task = instantiate_new_step(
tools=tools,
idx=idx,
tool_name=parse_result.tool_name,
args=parse_result.args,
thought=parse_result.thought,
)
graph_dict[idx] = task
if task.is_join:
break
return graph_dict
def generate_context_for_replanner(
tasks: Dict[int, LLMCompilerTask], joiner_thought: str
) -> str:
"""
Generate context for replanning.
Formatted like this.
```
1. action 1
Observation: xxx
2. action 2
Observation: yyy
...
Thought: joinner_thought
```
"""
previous_plan_and_observations = "\n".join(
[
task.get_thought_action_observation(
include_action=True, include_action_idx=True
)
for task in tasks.values()
if not task.is_join
]
)
joiner_thought = f"Thought: {joiner_thought}"
# use f-string instead
return f"{previous_plan_and_observations}\n\n{joiner_thought}"
def format_contexts(contexts: Sequence[str]) -> str:
"""
Format contexts.
Taken from https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/llm_compiler.py
Contexts is a list of context.
Each context is formatted as the description of generate_context_for_replanner
"""
formatted_contexts = ""
for context in contexts:
formatted_contexts += f"Previous Plan:\n\n{context}\n\n"
formatted_contexts += "Current Plan:\n\n"
return formatted_contexts
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`), an
AnyTensor (`PointCloud3D.tensor`), and an AnyEmbedding (`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
pc.second_embedding = model(pc.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDocument):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensor = mmdoc.point_cloud.url.load(samples=100)
# or
mmdoc.point_cloud.bytes = mmdoc.point_cloud.url.load_bytes()
"""
url: Optional[PointCloud3DUrl]
tensor: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
try:
import torch
torch_available = True
except ImportError:
torch_available = False
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`), an
AnyTensor (`PointCloud3D.tensor`), and an AnyEmbedding (`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
pc.second_embedding = model(pc.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDocument):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensor = mmdoc.point_cloud.url.load(samples=100)
# or
mmdoc.point_cloud.bytes = mmdoc.point_cloud.url.load_bytes()
"""
url: Optional[PointCloud3DUrl]
tensor: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
import importlib
import os
import re
from pathlib import Path
from typing import TYPE_CHECKING, TypeVar
if TYPE_CHECKING:
from backend.data.block import Block
T = TypeVar("T")
_AVAILABLE_BLOCKS: dict[str, type["Block"]] = {}
def load_all_blocks() -> dict[str, type["Block"]]:
from backend.data.block import Block
if _AVAILABLE_BLOCKS:
return _AVAILABLE_BLOCKS
# Dynamically load all modules under backend.blocks
AVAILABLE_MODULES = []
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
for f in current_dir.rglob("*.py")
if f.is_file() and f.name != "__init__.py"
]
for module in modules:
if not re.match("^[a-z0-9_.]+$", module):
raise ValueError(
f"Block module {module} error: module name must be lowercase, "
"and contain only alphanumeric characters and underscores."
)
importlib.import_module(f".{module}", package=__name__)
AVAILABLE_MODULES.append(module)
# Load all Block instances from the available modules
for block_cls in all_subclasses(Block):
class_name = block_cls.__name__
if class_name.endswith("Base"):
continue
if not class_name.endswith("Block"):
raise ValueError(
f"Block class {class_name} does not end with 'Block'. "
"If you are creating an abstract class, "
"please name the class with 'Base' at the end"
)
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(
f"Block ID {block.name} error: {block.id} is not a valid UUID"
)
if block.id in _AVAILABLE_BLOCKS:
raise ValueError(
f"Block ID {block.name} error: {block.id} is already in use"
)
input_schema = block.input_schema.model_fields
output_schema = block.output_schema.model_fields
# Make sure `error` field is a string in the output schema
if "error" in output_schema and output_schema["error"].annotation is not str:
raise ValueError(
f"{block.name} `error` field in output_schema must be a string"
)
# Ensure all fields in input_schema and output_schema are annotated SchemaFields
for field_name, field in [*input_schema.items(), *output_schema.items()]:
if field.annotation is None:
raise ValueError(
f"{block.name} has a field {field_name} that is not annotated"
)
if field.json_schema_extra is None:
raise ValueError(
f"{block.name} has a field {field_name} not defined as SchemaField"
)
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(
f"{block.name} has a boolean field with no default value"
)
_AVAILABLE_BLOCKS[block.id] = block_cls
return _AVAILABLE_BLOCKS
__all__ = ["load_all_blocks"]
def all_subclasses(cls: type[T]) -> list[type[T]]:
subclasses = cls.__subclasses__()
for subclass in subclasses:
subclasses += all_subclasses(subclass)
return subclasses
|
import importlib
import os
import re
from pathlib import Path
from typing import Type, TypeVar
from backend.data.block import Block
# Dynamically load all modules under backend.blocks
AVAILABLE_MODULES = []
current_dir = Path(__file__).parent
modules = [
str(f.relative_to(current_dir))[:-3].replace(os.path.sep, ".")
for f in current_dir.rglob("*.py")
if f.is_file() and f.name != "__init__.py"
]
for module in modules:
if not re.match("^[a-z0-9_.]+$", module):
raise ValueError(
f"Block module {module} error: module name must be lowercase, "
"and contain only alphanumeric characters and underscores."
)
importlib.import_module(f".{module}", package=__name__)
AVAILABLE_MODULES.append(module)
# Load all Block instances from the available modules
AVAILABLE_BLOCKS: dict[str, Type[Block]] = {}
T = TypeVar("T")
def all_subclasses(cls: Type[T]) -> list[Type[T]]:
subclasses = cls.__subclasses__()
for subclass in subclasses:
subclasses += all_subclasses(subclass)
return subclasses
for block_cls in all_subclasses(Block):
name = block_cls.__name__
if block_cls.__name__.endswith("Base"):
continue
if not block_cls.__name__.endswith("Block"):
raise ValueError(
f"Block class {block_cls.__name__} does not end with 'Block', If you are creating an abstract class, please name the class with 'Base' at the end"
)
block = block_cls.create()
if not isinstance(block.id, str) or len(block.id) != 36:
raise ValueError(f"Block ID {block.name} error: {block.id} is not a valid UUID")
if block.id in AVAILABLE_BLOCKS:
raise ValueError(f"Block ID {block.name} error: {block.id} is already in use")
input_schema = block.input_schema.model_fields
output_schema = block.output_schema.model_fields
# Make sure `error` field is a string in the output schema
if "error" in output_schema and output_schema["error"].annotation is not str:
raise ValueError(
f"{block.name} `error` field in output_schema must be a string"
)
# Make sure all fields in input_schema and output_schema are annotated and has a value
for field_name, field in [*input_schema.items(), *output_schema.items()]:
if field.annotation is None:
raise ValueError(
f"{block.name} has a field {field_name} that is not annotated"
)
if field.json_schema_extra is None:
raise ValueError(
f"{block.name} has a field {field_name} not defined as SchemaField"
)
for field in block.input_schema.model_fields.values():
if field.annotation is bool and field.default not in (True, False):
raise ValueError(f"{block.name} has a boolean field with no default value")
if block.disabled:
continue
AVAILABLE_BLOCKS[block.id] = block_cls
__all__ = ["AVAILABLE_MODULES", "AVAILABLE_BLOCKS"]
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-c4.py',
'../_base_/schedules/schedule_1x.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),
(1333, 608), (1333, 640), (1333, 672), (1333, 704),
(1333, 736), (1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='ConcatDataset',
datasets=[
dict(
type='VOCDataset',
data_root={{_base_.data_root}},
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args={{_base_.backend_args}}),
dict(
type='VOCDataset',
data_root={{_base_.data_root}},
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args={{_base_.backend_args}})
]))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule for 18k
max_iter = 18000
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=max_iter,
val_interval=3000)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=100),
dict(
type='MultiStepLR',
begin=0,
end=max_iter,
by_epoch=False,
milestones=[12000, 16000],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=3000))
log_processor = dict(by_epoch=False)
|
_base_ = [
'../_base_/models/faster-rcnn_r50-caffe-c4.py',
'../_base_/schedules/schedule_1x.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 480), (1333, 512), (1333, 544), (1333, 576),
(1333, 608), (1333, 640), (1333, 672), (1333, 704),
(1333, 736), (1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
_delete_=True,
type='ConcatDataset',
datasets=[
dict(
type='VOCDataset',
data_root={{_base_.data_root}},
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline),
dict(
type='VOCDataset',
data_root={{_base_.data_root}},
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)
]))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule for 18k
max_iter = 18000
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=max_iter,
val_interval=3000)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=100),
dict(
type='MultiStepLR',
begin=0,
end=max_iter,
by_epoch=False,
milestones=[12000, 16000],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
default_hooks = dict(checkpoint=dict(by_epoch=False, interval=3000))
log_processor = dict(by_epoch=False)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Iterator, List, Optional, Sequence, Union
from mmengine.data import BaseDataElement
from ..registry.root import METRICS
from .metric import BaseMetric
class Evaluator:
"""Wrapper class to compose multiple :class:`BaseMetric` instances.
Args:
metrics (dict or BaseMetric or Sequence): The config of metrics.
"""
def __init__(self, metrics: Union[dict, BaseMetric, Sequence]):
self._dataset_meta: Optional[dict] = None
if not isinstance(metrics, Sequence):
metrics = [metrics]
self.metrics: List[BaseMetric] = []
for metric in metrics:
if isinstance(metric, BaseMetric):
self.metrics.append(metric)
elif isinstance(metric, dict):
self.metrics.append(METRICS.build(metric))
else:
raise TypeError('metric should be a dict or a BaseMetric, '
f'but got {metric}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
for metric in self.metrics:
metric.dataset_meta = dataset_meta
def process(self, data_batch: Sequence[dict],
predictions: Sequence[BaseDataElement]):
"""Convert ``BaseDataSample`` to dict and invoke process method of each
metric.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
predictions (Sequence[BaseDataElement]): A batch of outputs from
the model.
"""
_data_batch = []
for data in data_batch:
if isinstance(data['data_sample'], BaseDataElement):
_data_batch.append(
dict(
inputs=data['inputs'],
data_sample=data['data_sample'].to_dict()))
else:
_data_batch.append(data)
_predictions = []
for pred in predictions:
if isinstance(pred, BaseDataElement):
_predictions.append(pred.to_dict())
else:
_predictions.append(pred)
for metric in self.metrics:
metric.process(_data_batch, _predictions)
def evaluate(self, size: int) -> dict:
"""Invoke ``evaluate`` method of each metric and collect the metrics
dictionary.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation results of all metrics. The keys are the names
of the metrics, and the values are corresponding results.
"""
metrics = {}
for metric in self.metrics:
_results = metric.evaluate(size)
# Check metric name conflicts
for name in _results.keys():
if name in metrics:
raise ValueError(
'There are multiple evaluation results with the same '
f'metric name {name}. Please make sure all metrics '
'have different prefixes.')
metrics.update(_results)
return metrics
def offline_evaluate(self,
data: Sequence,
predictions: Sequence,
chunk_size: int = 1):
"""Offline evaluate the dumped predictions on the given data .
Args:
data (Sequence): All data of the validation set.
predictions (Sequence): All predictions of the model on the
validation set.
chunk_size (int): The number of data samples and predictions to be
processed in a batch.
"""
# support chunking iterable objects
def get_chunks(seq: Iterator, chunk_size=1):
stop = False
while not stop:
chunk = []
for _ in range(chunk_size):
try:
chunk.append(next(seq))
except StopIteration:
stop = True
break
if chunk:
yield chunk
size = 0
for data_chunk, pred_chunk in zip(
get_chunks(iter(data), chunk_size),
get_chunks(iter(predictions), chunk_size)):
size += len(data_chunk)
self.process(data_chunk, pred_chunk)
return self.evaluate(size)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Iterator, List, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataElement
from ..registry.root import METRICS
from .metric import BaseMetric
class Evaluator:
"""Wrapper class to compose multiple :class:`BaseMetric` instances.
Args:
metrics (dict or BaseMetric or Sequence): The config of metrics.
"""
def __init__(self, metrics: Union[dict, BaseMetric, Sequence]):
self._dataset_meta: Optional[dict] = None
if not isinstance(metrics, Sequence):
metrics = [metrics]
self.metrics: List[BaseMetric] = []
for metric in metrics:
if isinstance(metric, BaseMetric):
self.metrics.append(metric)
elif isinstance(metric, dict):
self.metrics.append(METRICS.build(metric))
else:
raise TypeError('metric should be a dict or a BaseMetric, '
f'but got {metric}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
for metric in self.metrics:
metric.dataset_meta = dataset_meta
def process(self, data_batch: Sequence[Tuple[Any, BaseDataElement]],
predictions: Sequence[BaseDataElement]):
"""Convert ``BaseDataSample`` to dict and invoke process method of each
metric.
Args:
data_batch (Sequence[Tuple[Any, BaseDataElement]]): A batch of data
from the dataloader.
predictions (Sequence[BaseDataElement]): A batch of outputs from
the model.
"""
_data_batch = []
for input, data in data_batch:
if isinstance(data, BaseDataElement):
_data_batch.append((input, data.to_dict()))
else:
_data_batch.append((input, data))
_predictions = []
for pred in predictions:
if isinstance(pred, BaseDataElement):
_predictions.append(pred.to_dict())
else:
_predictions.append(pred)
for metric in self.metrics:
metric.process(_data_batch, _predictions)
def evaluate(self, size: int) -> dict:
"""Invoke ``evaluate`` method of each metric and collect the metrics
dictionary.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation results of all metrics. The keys are the names
of the metrics, and the values are corresponding results.
"""
metrics = {}
for metric in self.metrics:
_results = metric.evaluate(size)
# Check metric name conflicts
for name in _results.keys():
if name in metrics:
raise ValueError(
'There are multiple evaluation results with the same '
f'metric name {name}. Please make sure all metrics '
'have different prefixes.')
metrics.update(_results)
return metrics
def offline_evaluate(self,
data: Sequence,
predictions: Sequence,
chunk_size: int = 1):
"""Offline evaluate the dumped predictions on the given data .
Args:
data (Sequence): All data of the validation set.
predictions (Sequence): All predictions of the model on the
validation set.
chunk_size (int): The number of data samples and predictions to be
processed in a batch.
"""
# support chunking iterable objects
def get_chunks(seq: Iterator, chunk_size=1):
stop = False
while not stop:
chunk = []
for _ in range(chunk_size):
try:
chunk.append(next(seq))
except StopIteration:
stop = True
break
if chunk:
yield chunk
size = 0
for data_chunk, pred_chunk in zip(
get_chunks(iter(data), chunk_size),
get_chunks(iter(predictions), chunk_size)):
size += len(data_chunk)
self.process(data_chunk, pred_chunk)
return self.evaluate(size)
|
"""
To better manage tools, we introduce a class called ToolImporter, which is used for importing and managing tool usage in SecGPT. Moreover, we also define some tool helper functions for spoke definition.
"""
from llama_index.core.tools import FunctionTool
class ToolImporter:
"""
A class to manage the importing and usage of tools in SecGPT.
Attributes:
tools (list): A list of tools.
tool_name_obj_dict (dict): A dictionary mapping tool names to tool objects.
tool_functions (dict): A dictionary mapping tool names to their functions.
"""
def __init__(self, tools, tool_specs=[]) -> None:
"""
Initialize the ToolImporter with tools and tool specifications.
Args:
tools (list): A list of tools.
tool_specs (list, optional): A list of tool specifications. Defaults to [].
"""
self.tool_functions = {}
# Load individual tools
self.tools = tools
self.tool_name_obj_dict = {tool.metadata.name: tool for tool in tools}
self.tool_functions = {
tool.metadata.name: [tool.metadata.name] for tool in tools
}
# Load tool specs
for tool_spec in tool_specs:
tool_list = tool_spec.to_tool_list()
self.tools.extend(tool_list)
spec_tool_name_obj_dict = {tool.metadata.name: tool for tool in tool_list}
self.tool_name_obj_dict.update(spec_tool_name_obj_dict)
self.tool_functions[tool_spec.__class__.__name__] = list(
spec_tool_name_obj_dict.keys()
)
def get_all_tools(self):
"""
Get the list of all tool objects.
Returns:
list: A list of all tool objects.
"""
return self.tools
def get_tool_names(self):
"""
Get the list of all available tool names.
Returns:
list: A list of available tool names.
"""
return [tool.metadata.name for tool in self.tools]
def get_collab_functions(self, tool_name=None):
"""
Get the list of available functionalities excluding the specified tool.
Args:
tool_name (str, optional): The name of the tool to exclude. Defaults to None.
Returns:
list: A list of available functionalities.
"""
if tool_name:
return [
tool.metadata.name
for tool in self.tools
if tool.metadata.name != tool_name
]
else:
return [tool.metadata.name for tool in self.tools]
def get_tool_spec(self, function):
"""
Get the specification of a specific tool function.
Args:
function (str): The name of the tool function.
Returns:
dict: The tool function's specifications.
"""
tool_obj = self.tool_name_obj_dict[function]
return tool_obj.metadata.get_parameters_dict()
def get_tool_by_name(self, tool_name):
"""
Get the tool object by its name.
Args:
tool_name (str): The name of the tool.
Returns:
FunctionTool: The tool object.
"""
return self.tool_name_obj_dict[tool_name]
def get_tool_functions(self):
"""
Get the mapping of tool functions.
Returns:
dict: A dictionary mapping tool names to their functions.
"""
return self.tool_functions
def get_tool_info(self):
"""
Get the information of all tools.
Returns:
str: A string containing the tool information.
"""
return "\n".join(
[
f"{tool.metadata.name}: {tool.metadata.description}"
for tool in self.tools
]
)
def create_function_placeholder(function_names):
"""
Create placeholders for functions.
Args:
function_names (list): A list of function names.
Returns:
list: A list of FunctionTool placeholders.
"""
func_placeholders = []
for func in function_names:
func_placeholder = FunctionTool.from_defaults(
fn=(lambda *args, **kwargs: None), name=func, description=func
)
func_placeholders.append(func_placeholder)
return func_placeholders
def create_message_spoke_tool():
"""
Create a tool for messaging between spoke_operator and spoke LLM.
Returns:
FunctionTool: The message spoke tool.
"""
def message_spoke(message: str):
return message
return FunctionTool.from_defaults(
fn=message_spoke,
name="message_spoke",
description="send message from the spoke_operator to the spoke LLM",
)
|
"""
To better manage tools, we introduce a class called ToolImporter, which is used for importing and managing tool usage in SecGPT. Moreover, we also define some tool helper functions for spoke definition.
"""
from llama_index.core.tools import FunctionTool
class ToolImporter:
"""
A class to manage the importing and usage of tools in SecGPT.
Attributes:
tools (list): A list of tools.
tool_name_obj_dict (dict): A dictionary mapping tool names to tool objects.
tool_functions (dict): A dictionary mapping tool names to their functions.
"""
def __init__(self, tools, tool_specs=[]) -> None:
"""
Initialize the ToolImporter with tools and tool specifications.
Args:
tools (list): A list of tools.
tool_specs (list, optional): A list of tool specifications. Defaults to [].
"""
self.tool_functions = {}
# Load individual tools
self.tools = tools
self.tool_name_obj_dict = {tool.metadata.name: tool for tool in tools}
self.tool_functions = {
tool.metadata.name: [tool.metadata.name] for tool in tools
}
# Load tool specs
for tool_spec in tool_specs:
tool_list = tool_spec.to_tool_list()
self.tools.extend(tool_list)
spec_tool_name_obj_dict = {tool.metadata.name: tool for tool in tool_list}
self.tool_name_obj_dict.update(spec_tool_name_obj_dict)
self.tool_functions[tool_spec.__class__.__name__] = list(
spec_tool_name_obj_dict.keys()
)
def get_all_tools(self):
"""
Get the list of all tool objects.
Returns:
list: A list of all tool objects.
"""
return self.tools
def get_tool_names(self):
"""
Get the list of all available tool names.
Returns:
list: A list of available tool names.
"""
return [tool.metadata.name for tool in self.tools]
def get_collab_functions(self, tool_name=None):
"""
Get the list of available functionalities excluding the specified tool.
Args:
tool_name (str, optional): The name of the tool to exclude. Defaults to None.
Returns:
list: A list of available functionalities.
"""
if tool_name:
return [
tool.metadata.name
for tool in self.tools
if tool.metadata.name != tool_name
]
else:
return [tool.metadata.name for tool in self.tools]
def get_tool_spec(self, function):
"""
Get the specification of a specific tool function.
Args:
function (str): The name of the tool function.
Returns:
dict: The tool function's specifications.
"""
tool_obj = self.tool_name_obj_dict[function]
return tool_obj.metadata.get_parameters_dict()
def get_tool_by_name(self, tool_name):
"""
Get the tool object by its name.
Args:
tool_name (str): The name of the tool.
Returns:
FunctionTool: The tool object.
"""
return self.tool_name_obj_dict[tool_name]
def get_tool_functions(self):
"""
Get the mapping of tool functions.
Returns:
dict: A dictionary mapping tool names to their functions.
"""
return self.tool_functions
def get_tool_info(self):
"""
Get the information of all tools.
Returns:
str: A string containing the tool information.
"""
return "\n".join(
[
f"{tool.metadata.name}: {tool.metadata.description}"
for tool in self.tools
]
)
def create_function_placeholder(function_names):
"""
Create placeholders for functions.
Args:
function_names (list): A list of function names.
Returns:
list: A list of FunctionTool placeholders.
"""
func_placeholders = []
for func in function_names:
func_placeholder = FunctionTool.from_defaults(
fn=(lambda *args, **kwargs: None), name=func, description=func
)
func_placeholders.append(func_placeholder)
return func_placeholders
def create_message_spoke_tool():
"""
Create a tool for messaging between spoke_operator and spoke LLM.
Returns:
FunctionTool: The message spoke tool.
"""
def message_spoke(message: str):
return message
return FunctionTool.from_defaults(
fn=message_spoke,
name="message_spoke",
description="send message from the spoke_operator to the spoke LLM",
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.nn as nn
from torch.distributed import destroy_process_group, init_process_group
from torch.nn.parallel import DataParallel, DistributedDataParallel
from mmengine.model import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel,
convert_sync_batchnorm, is_model_wrapper,
revert_sync_batchnorm)
from mmengine.registry import MODEL_WRAPPERS, Registry
from mmengine.utils import is_installed
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_revert_syncbn():
# conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
conv = nn.Sequential(nn.Conv2d(3, 8, 2), nn.SyncBatchNorm(8))
x = torch.randn(1, 3, 10, 10)
# Expect a ValueError prompting that SyncBN is not supported on CPU
with pytest.raises(ValueError):
y = conv(x)
conv = revert_sync_batchnorm(conv)
y = conv(x)
assert y.shape == (1, 8, 9, 9)
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_convert_syncbn():
# conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
conv = nn.Sequential(nn.Conv2d(3, 8, 2), nn.BatchNorm2d(8))
x = torch.randn(1, 3, 10, 10)
y = conv(x)
assert y.shape == (1, 8, 9, 9)
# Test convert to mmcv SyncBatchNorm
if is_installed('mmcv'):
# MMCV SyncBatchNorm is only supported on distributed training.
with pytest.raises((RuntimeError, AssertionError)):
convert_sync_batchnorm(conv, implementation='mmcv')
# Test convert to Pytorch SyncBatchNorm
# Expect a ValueError prompting that SyncBN is not supported on CPU
converted_conv = convert_sync_batchnorm(conv)
assert isinstance(converted_conv[1], torch.nn.SyncBatchNorm)
with pytest.raises(ValueError):
converted_conv(x)
def test_is_model_wrapper():
# Test basic module wrapper.
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29510'
os.environ['RANK'] = str(0)
init_process_group(backend='gloo', rank=0, world_size=1)
model = nn.Linear(1, 1)
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel
]:
wrapper_model = wrapper(model)
assert is_model_wrapper(wrapper_model)
# Test `is_model_wrapper` can check model wrapper registered in custom
# registry.
CHILD_REGISTRY = Registry('test_is_model_wrapper', parent=MODEL_WRAPPERS)
class CustomModelWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
pass
CHILD_REGISTRY.register_module(module=CustomModelWrapper)
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel, CustomModelWrapper
]:
wrapper_model = wrapper(model)
assert is_model_wrapper(wrapper_model)
# Test `is_model_wrapper` will not check model wrapper in parent
# registry from a child registry.
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel
]:
wrapper_model = wrapper(model)
assert not is_model_wrapper(wrapper_model, registry=CHILD_REGISTRY)
wrapper_model = CustomModelWrapper(model)
assert is_model_wrapper(wrapper_model, registry=CHILD_REGISTRY)
destroy_process_group()
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import pytest
import torch
import torch.nn as nn
from torch.distributed import destroy_process_group, init_process_group
from torch.nn.parallel import DataParallel, DistributedDataParallel
from mmengine.model import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel,
is_model_wrapper, revert_sync_batchnorm)
from mmengine.registry import MODEL_WRAPPERS, Registry
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
def test_revert_syncbn():
# conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
conv = nn.Sequential(nn.Conv2d(3, 8, 2), nn.SyncBatchNorm(8))
x = torch.randn(1, 3, 10, 10)
# Expect a ValueError prompting that SyncBN is not supported on CPU
with pytest.raises(ValueError):
y = conv(x)
conv = revert_sync_batchnorm(conv)
y = conv(x)
assert y.shape == (1, 8, 9, 9)
def test_is_model_wrapper():
# Test basic module wrapper.
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29510'
os.environ['RANK'] = str(0)
init_process_group(backend='gloo', rank=0, world_size=1)
model = nn.Linear(1, 1)
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel
]:
wrapper_model = wrapper(model)
assert is_model_wrapper(wrapper_model)
# Test `is_model_wrapper` can check model wrapper registered in custom
# registry.
CHILD_REGISTRY = Registry('test_is_model_wrapper', parent=MODEL_WRAPPERS)
class CustomModelWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
pass
CHILD_REGISTRY.register_module(module=CustomModelWrapper)
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel, CustomModelWrapper
]:
wrapper_model = wrapper(model)
assert is_model_wrapper(wrapper_model)
# Test `is_model_wrapper` will not check model wrapper in parent
# registry from a child registry.
for wrapper in [
DistributedDataParallel, MMDistributedDataParallel,
MMSeparateDistributedDataParallel, DataParallel
]:
wrapper_model = wrapper(model)
assert not is_model_wrapper(wrapper_model, registry=CHILD_REGISTRY)
wrapper_model = CustomModelWrapper(model)
assert is_model_wrapper(wrapper_model, registry=CHILD_REGISTRY)
destroy_process_group()
|
from __future__ import annotations
import os
import platform
import struct
from itertools import chain
from typing import cast, TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Iterable
CMAKE_MINIMUM_VERSION_STRING = "3.27"
IS_WINDOWS = platform.system() == "Windows"
IS_DARWIN = platform.system() == "Darwin"
IS_LINUX = platform.system() == "Linux"
IS_64BIT = struct.calcsize("P") == 8
BUILD_DIR = "build"
def check_env_flag(name: str, default: str = "") -> bool:
return os.getenv(name, default).upper() in ["ON", "1", "YES", "TRUE", "Y"]
def check_negative_env_flag(name: str, default: str = "") -> bool:
return os.getenv(name, default).upper() in ["OFF", "0", "NO", "FALSE", "N"]
def gather_paths(env_vars: Iterable[str]) -> list[str]:
return list(chain(*(os.getenv(v, "").split(os.pathsep) for v in env_vars)))
def lib_paths_from_base(base_path: str) -> list[str]:
return [os.path.join(base_path, s) for s in ["lib/x64", "lib", "lib64"]]
# We promised that CXXFLAGS should also be affected by CFLAGS
if "CFLAGS" in os.environ and "CXXFLAGS" not in os.environ:
os.environ["CXXFLAGS"] = os.environ["CFLAGS"]
class BuildType:
"""Checks build type. The build type will be given in :attr:`cmake_build_type_env`. If :attr:`cmake_build_type_env`
is ``None``, then the build type will be inferred from ``CMakeCache.txt``. If ``CMakeCache.txt`` does not exist,
os.environ['CMAKE_BUILD_TYPE'] will be used.
Args:
cmake_build_type_env (str): The value of os.environ['CMAKE_BUILD_TYPE']. If None, the actual build type will be
inferred.
"""
def __init__(self, cmake_build_type_env: str | None = None) -> None:
if cmake_build_type_env is not None:
self.build_type_string = cmake_build_type_env
return
cmake_cache_txt = os.path.join(BUILD_DIR, "CMakeCache.txt")
if os.path.isfile(cmake_cache_txt):
# Found CMakeCache.txt. Use the build type specified in it.
from .cmake_utils import get_cmake_cache_variables_from_file
with open(cmake_cache_txt) as f:
cmake_cache_vars = get_cmake_cache_variables_from_file(f)
# Normally it is anti-pattern to determine build type from CMAKE_BUILD_TYPE because it is not used for
# multi-configuration build tools, such as Visual Studio and XCode. But since we always communicate with
# CMake using CMAKE_BUILD_TYPE from our Python scripts, this is OK here.
self.build_type_string = cast(str, cmake_cache_vars["CMAKE_BUILD_TYPE"])
else:
self.build_type_string = os.environ.get("CMAKE_BUILD_TYPE", "Release")
def is_debug(self) -> bool:
"Checks Debug build."
return self.build_type_string == "Debug"
def is_rel_with_deb_info(self) -> bool:
"Checks RelWithDebInfo build."
return self.build_type_string == "RelWithDebInfo"
def is_release(self) -> bool:
"Checks Release build."
return self.build_type_string == "Release"
# hotpatch environment variable 'CMAKE_BUILD_TYPE'. 'CMAKE_BUILD_TYPE' always prevails over DEBUG or REL_WITH_DEB_INFO.
if "CMAKE_BUILD_TYPE" not in os.environ:
if check_env_flag("DEBUG"):
os.environ["CMAKE_BUILD_TYPE"] = "Debug"
elif check_env_flag("REL_WITH_DEB_INFO"):
os.environ["CMAKE_BUILD_TYPE"] = "RelWithDebInfo"
else:
os.environ["CMAKE_BUILD_TYPE"] = "Release"
build_type = BuildType()
|
from __future__ import annotations
import os
import platform
import struct
from itertools import chain
from typing import cast, TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Iterable
IS_WINDOWS = platform.system() == "Windows"
IS_DARWIN = platform.system() == "Darwin"
IS_LINUX = platform.system() == "Linux"
IS_64BIT = struct.calcsize("P") == 8
BUILD_DIR = "build"
def check_env_flag(name: str, default: str = "") -> bool:
return os.getenv(name, default).upper() in ["ON", "1", "YES", "TRUE", "Y"]
def check_negative_env_flag(name: str, default: str = "") -> bool:
return os.getenv(name, default).upper() in ["OFF", "0", "NO", "FALSE", "N"]
def gather_paths(env_vars: Iterable[str]) -> list[str]:
return list(chain(*(os.getenv(v, "").split(os.pathsep) for v in env_vars)))
def lib_paths_from_base(base_path: str) -> list[str]:
return [os.path.join(base_path, s) for s in ["lib/x64", "lib", "lib64"]]
# We promised that CXXFLAGS should also be affected by CFLAGS
if "CFLAGS" in os.environ and "CXXFLAGS" not in os.environ:
os.environ["CXXFLAGS"] = os.environ["CFLAGS"]
class BuildType:
"""Checks build type. The build type will be given in :attr:`cmake_build_type_env`. If :attr:`cmake_build_type_env`
is ``None``, then the build type will be inferred from ``CMakeCache.txt``. If ``CMakeCache.txt`` does not exist,
os.environ['CMAKE_BUILD_TYPE'] will be used.
Args:
cmake_build_type_env (str): The value of os.environ['CMAKE_BUILD_TYPE']. If None, the actual build type will be
inferred.
"""
def __init__(self, cmake_build_type_env: str | None = None) -> None:
if cmake_build_type_env is not None:
self.build_type_string = cmake_build_type_env
return
cmake_cache_txt = os.path.join(BUILD_DIR, "CMakeCache.txt")
if os.path.isfile(cmake_cache_txt):
# Found CMakeCache.txt. Use the build type specified in it.
from .cmake_utils import get_cmake_cache_variables_from_file
with open(cmake_cache_txt) as f:
cmake_cache_vars = get_cmake_cache_variables_from_file(f)
# Normally it is anti-pattern to determine build type from CMAKE_BUILD_TYPE because it is not used for
# multi-configuration build tools, such as Visual Studio and XCode. But since we always communicate with
# CMake using CMAKE_BUILD_TYPE from our Python scripts, this is OK here.
self.build_type_string = cast(str, cmake_cache_vars["CMAKE_BUILD_TYPE"])
else:
self.build_type_string = os.environ.get("CMAKE_BUILD_TYPE", "Release")
def is_debug(self) -> bool:
"Checks Debug build."
return self.build_type_string == "Debug"
def is_rel_with_deb_info(self) -> bool:
"Checks RelWithDebInfo build."
return self.build_type_string == "RelWithDebInfo"
def is_release(self) -> bool:
"Checks Release build."
return self.build_type_string == "Release"
# hotpatch environment variable 'CMAKE_BUILD_TYPE'. 'CMAKE_BUILD_TYPE' always prevails over DEBUG or REL_WITH_DEB_INFO.
if "CMAKE_BUILD_TYPE" not in os.environ:
if check_env_flag("DEBUG"):
os.environ["CMAKE_BUILD_TYPE"] = "Debug"
elif check_env_flag("REL_WITH_DEB_INFO"):
os.environ["CMAKE_BUILD_TYPE"] = "RelWithDebInfo"
else:
os.environ["CMAKE_BUILD_TYPE"] = "Release"
build_type = BuildType()
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
type='MaskRCNN',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[96, 192, 384, 768]))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[[
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline))
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(warmup_iters=1000, step=[27, 33])
runner = dict(max_epochs=36)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
pretrained = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth' # noqa
model = dict(
type='MaskRCNN',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[96, 192, 384, 768]))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[[
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline))
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(warmup_iters=1000, step=[27, 33])
runner = dict(max_epochs=36)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling1D", "keras.layers.AvgPool1D"])
class AveragePooling1D(BasePooling):
"""Average pooling for temporal data.
Downsamples the input representation by taking the average value over the
window defined by `pool_size`. The window is shifted by `strides`. The
resulting output when using "valid" padding option has a shape of:
`output_shape = (input_shape - pool_size + 1) / strides)`
The resulting output shape when using the "same" padding option is:
`output_shape = input_shape / strides`
Args:
pool_size: int, size of the max pooling window.
strides: int or None. Specifies how much the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
Examples:
`strides=1` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,
... strides=1, padding="valid")
>>> avg_pool_1d(x)
`strides=2` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,
... strides=2, padding="valid")
>>> avg_pool_1d(x)
`strides=1` and `padding="same"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,
... strides=1, padding="same")
>>> avg_pool_1d(x)
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=1,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling1D", "keras.layers.AvgPool1D"])
class AveragePooling1D(BasePooling):
"""Average pooling for temporal data.
Downsamples the input representation by taking the average value over the
window defined by `pool_size`. The window is shifted by `strides`. The
resulting output when using "valid" padding option has a shape of:
`output_shape = (input_shape - pool_size + 1) / strides)`
The resulting output shape when using the "same" padding option is:
`output_shape = input_shape / strides`
Args:
pool_size: int, size of the max pooling window.
strides: int or None. Specifies how much the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, steps)`.
Output shape:
- If `data_format="channels_last"`:
3D tensor with shape `(batch_size, downsampled_steps, features)`.
- If `data_format="channels_first"`:
3D tensor with shape `(batch_size, features, downsampled_steps)`.
Examples:
`strides=1` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,
... strides=1, padding="valid")
>>> avg_pool_1d(x)
`strides=2` and `padding="valid"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,
... strides=2, padding="valid")
>>> avg_pool_1d(x)
`strides=1` and `padding="same"`:
>>> x = np.array([1., 2., 3., 4., 5.])
>>> x = np.reshape(x, [1, 5, 1])
>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,
... strides=1, padding="same")
>>> avg_pool_1d(x)
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=1,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Callable
import numpy as np
from sentence_transformers.evaluation.NanoBEIREvaluator import NanoBEIREvaluator
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
if TYPE_CHECKING:
from torch import Tensor
from sentence_transformers.evaluation import SimilarityFunction
from sentence_transformers.evaluation.NanoBEIREvaluator import (
DatasetNameType,
)
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseNanoBEIREvaluator(NanoBEIREvaluator):
def __init__(
self,
dataset_names: list[DatasetNameType] | None = None,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] = None,
main_score_function: str | SimilarityFunction | None = None,
aggregate_fn: Callable[[list[float]], float] = np.mean,
aggregate_key: str = "mean",
query_prompts: str | dict[str, str] | None = None,
corpus_prompts: str | dict[str, str] | None = None,
):
self.information_retrieval_class = SparseInformationRetrievalEvaluator
super().__init__(
dataset_names=dataset_names,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
aggregate_fn=aggregate_fn,
aggregate_key=aggregate_key,
query_prompts=query_prompts,
corpus_prompts=corpus_prompts,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def _load_dataset(
self, dataset_name: DatasetNameType, **ir_evaluator_kwargs
) -> SparseInformationRetrievalEvaluator:
return super()._load_dataset(dataset_name, **ir_evaluator_kwargs)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Callable
import numpy as np
from sentence_transformers.evaluation.NanoBEIREvaluator import NanoBEIREvaluator
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
if TYPE_CHECKING:
from torch import Tensor
from sentence_transformers.evaluation import SimilarityFunction
from sentence_transformers.evaluation.NanoBEIREvaluator import (
DatasetNameType,
)
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseNanoBEIREvaluator(NanoBEIREvaluator):
def __init__(
self,
dataset_names: list[DatasetNameType] | None = None,
mrr_at_k: list[int] = [10],
ndcg_at_k: list[int] = [10],
accuracy_at_k: list[int] = [1, 3, 5, 10],
precision_recall_at_k: list[int] = [1, 3, 5, 10],
map_at_k: list[int] = [100],
show_progress_bar: bool = False,
batch_size: int = 32,
write_csv: bool = True,
truncate_dim: int | None = None,
score_functions: dict[str, Callable[[Tensor, Tensor], Tensor]] = None,
main_score_function: str | SimilarityFunction | None = None,
aggregate_fn: Callable[[list[float]], float] = np.mean,
aggregate_key: str = "mean",
query_prompts: str | dict[str, str] | None = None,
corpus_prompts: str | dict[str, str] | None = None,
):
super().__init__(
dataset_names=dataset_names,
mrr_at_k=mrr_at_k,
ndcg_at_k=ndcg_at_k,
accuracy_at_k=accuracy_at_k,
precision_recall_at_k=precision_recall_at_k,
map_at_k=map_at_k,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
write_csv=write_csv,
truncate_dim=truncate_dim,
score_functions=score_functions,
main_score_function=main_score_function,
aggregate_fn=aggregate_fn,
aggregate_key=aggregate_key,
query_prompts=query_prompts,
corpus_prompts=corpus_prompts,
)
self.information_retrieval_class = SparseInformationRetrievalEvaluator
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1, *args, **kwargs
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps, *args, **kwargs)
def _load_dataset(
self, dataset_name: DatasetNameType, **ir_evaluator_kwargs
) -> SparseInformationRetrievalEvaluator:
return super()._load_dataset(dataset_name, **ir_evaluator_kwargs)
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Model Sparsity Stats Query : Row Non-Zero Mean: 62.97999954223633, Row Sparsity Mean: 0.9979365468025208
Model Sparsity Stats Corpus : Row Non-Zero Mean: 63.39932632446289, Row Sparsity Mean: 0.9979228377342224
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Model Sparsity Stats Query : Row Non-Zero Mean: 48.08000183105469, Row Sparsity Mean: 0.9984247088432312
Model Sparsity Stats Corpus : Row Non-Zero Mean: 125.3604965209961, Row Sparsity Mean: 0.9958928227424622
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
Model Sparsity Stats Query : Row Non-Zero Mean: 55.53000068664551, Row Sparsity Mean: 0.998180627822876
Model Sparsity Stats Corpus : Row Non-Zero Mean: 94.37991142272949, Row Sparsity Mean: 0.9969078302383423
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
import logging
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create evaluator for some NanoBEIR datasets
evaluator = SparseNanoBEIREvaluator(
dataset_names=["QuoraRetrieval", "MSMARCO"],
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
class BaseDenseHead(BaseModule, metaclass=ABCMeta):
"""Base class for DenseHeads."""
def __init__(self, init_cfg=None):
super(BaseDenseHead, self).__init__(init_cfg)
@abstractmethod
def loss(self, **kwargs):
"""Compute losses of the head."""
pass
@abstractmethod
def get_bboxes(self, **kwargs):
"""Transform network output for a batch into bbox predictions."""
pass
def forward_train(self,
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=None,
proposal_cfg=None,
**kwargs):
"""
Args:
x (list[Tensor]): Features from FPN.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
proposal_cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used
Returns:
tuple:
losses: (dict[str, Tensor]): A dictionary of loss components.
proposal_list (list[Tensor]): Proposals of each image.
"""
outs = self(x)
if gt_labels is None:
loss_inputs = outs + (gt_bboxes, img_metas)
else:
loss_inputs = outs + (gt_bboxes, gt_labels, img_metas)
losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
if proposal_cfg is None:
return losses
else:
proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg)
return losses, proposal_list
def simple_test(self, feats, img_metas, rescale=False):
"""Test function without test-time augmentation.
Args:
feats (tuple[torch.Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is ``bboxes`` with shape (n, 5),
where 5 represent (tl_x, tl_y, br_x, br_y, score).
The shape of the second tensor in the tuple is ``labels``
with shape (n,)
"""
return self.simple_test_bboxes(feats, img_metas, rescale=rescale)
|
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
class BaseDenseHead(BaseModule, metaclass=ABCMeta):
"""Base class for DenseHeads."""
def __init__(self, init_cfg=None):
super(BaseDenseHead, self).__init__(init_cfg)
@abstractmethod
def loss(self, **kwargs):
"""Compute losses of the head."""
pass
@abstractmethod
def get_bboxes(self, **kwargs):
"""Transform network output for a batch into bbox predictions."""
pass
def forward_train(self,
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=None,
proposal_cfg=None,
**kwargs):
"""
Args:
x (list[Tensor]): Features from FPN.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
proposal_cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used
Returns:
tuple:
losses: (dict[str, Tensor]): A dictionary of loss components.
proposal_list (list[Tensor]): Proposals of each image.
"""
outs = self(x)
if gt_labels is None:
loss_inputs = outs + (gt_bboxes, img_metas)
else:
loss_inputs = outs + (gt_bboxes, gt_labels, img_metas)
losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
if proposal_cfg is None:
return losses
else:
proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg)
return losses, proposal_list
def simple_test(self, feats, img_metas, rescale=False):
"""Test function without test-time augmentation.
Args:
feats (tuple[torch.Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is ``bboxes`` with shape (n, 5),
where 5 represent (tl_x, tl_y, br_x, br_y, score).
The shape of the second tensor in the tuple is ``labels``
with shape (n,)
"""
return self.simple_test_bboxes(feats, img_metas, rescale=rescale)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity : Pearson: 0.8430 Spearman: 0.8368
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Dot-Similarity : Pearson: 0.7513 Spearman: 0.8010
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_dot
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8010
|
"""Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.runtime import mixin_base_runtime_parser
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='WorkerRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/executor-files/>`__
''',
)
mixin_base_runtime_parser(gp)
|
"""Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction, add_arg_group
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='WorkerRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/executor-files/>`__
''',
)
gp.add_argument(
'--port-in',
type=int,
default=helper.random_port(),
dest='port',
help='The port for input data to bind to, default a random port between [49152, 65535]',
)
gp.add_argument(
'--host-in',
type=str,
default=__default_host__,
help=f'The host address for binding to, by default it is {__default_host__}',
)
gp.add_argument(
'--native',
action='store_true',
default=False,
help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.',
)
gp.add_argument(
'--output-array-type',
type=str,
default=None,
help='''
The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
''',
)
gp.add_argument(
'--grpc-server-options',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help="Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}",
default=None,
)
gp.add_argument(
'--exit-on-exceptions',
type=str,
default=[],
nargs='*',
help='List of exceptions that will cause the Executor to shut down.',
)
|
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.43"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.42"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
"""
import logging
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Create a large list of 100k sentences
sentences = [f"This is sentence {i}" for i in range(100000)]
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
# Compute the embeddings using the multi-process pool
emb = model.encode(sentences, pool=pool)
print("Embeddings computed. Shape:", emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
"""
import logging
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Create a large list of 100k sentences
sentences = [f"This is sentence {i}" for i in range(100000)]
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
# Compute the embeddings using the multi-process pool
emb = model.encode_multi_process(sentences, pool)
print("Embeddings computed. Shape:", emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
_base_ = './lsj-100e_coco-instance.py'
# 8x25=200e
train_dataloader = dict(dataset=dict(times=8))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=25,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
|
_base_ = './lsj_100e_coco_instance.py'
# 8x25=200e
train_dataloader = dict(dataset=dict(times=8))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=25,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.