input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
from .det_inferencer import DetInferencer
from .inference import (async_inference_detector, inference_detector,
inference_mot, init_detector, init_track_model)
__all__ = [
'init_detector', 'async_inference_detector', 'inference_detector',
'DetInferencer', 'inference_mot', 'init_track_model'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .det_inferencer import DetInferencer
from .inference import (async_inference_detector, inference_detector,
init_detector)
__all__ = [
'init_detector', 'async_inference_detector', 'inference_detector',
'DetInferencer'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.models.utils.misc import get_box_tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import bbox_overlaps
def cast_tensor_type(x, scale=1., dtype=None):
if dtype == 'fp16':
# scale is for preventing overflows
x = (x / scale).half()
return x
@TASK_UTILS.register_module()
class BboxOverlaps2D:
"""2D Overlaps (e.g. IoUs, GIoUs) Calculator."""
def __init__(self, scale=1., dtype=None):
self.scale = scale
self.dtype = dtype
def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate IoU between 2D bboxes.
Args:
bboxes1 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)
in <x1, y1, x2, y2> format, or shape (m, 5) in <x1, y1, x2,
y2, score> format.
bboxes2 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)
in <x1, y1, x2, y2> format, shape (m, 5) in <x1, y1, x2, y2,
score> format, or be empty. If ``is_aligned `` is ``True``,
then m and n must be equal.
mode (str): "iou" (intersection over union), "iof" (intersection
over foreground), or "giou" (generalized intersection over
union).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
"""
bboxes1 = get_box_tensor(bboxes1)
bboxes2 = get_box_tensor(bboxes2)
assert bboxes1.size(-1) in [0, 4, 5]
assert bboxes2.size(-1) in [0, 4, 5]
if bboxes2.size(-1) == 5:
bboxes2 = bboxes2[..., :4]
if bboxes1.size(-1) == 5:
bboxes1 = bboxes1[..., :4]
if self.dtype == 'fp16':
# change tensor type to save cpu and cuda memory and keep speed
bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype)
bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype)
overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
if not overlaps.is_cuda and overlaps.dtype == torch.float16:
# resume cpu float32
overlaps = overlaps.float()
return overlaps
return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
def __repr__(self):
"""str: a string describing the module"""
repr_str = self.__class__.__name__ + f'(' \
f'scale={self.scale}, dtype={self.dtype})'
return repr_str
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import bbox_overlaps
def cast_tensor_type(x, scale=1., dtype=None):
if dtype == 'fp16':
# scale is for preventing overflows
x = (x / scale).half()
return x
@TASK_UTILS.register_module()
class BboxOverlaps2D:
"""2D Overlaps (e.g. IoUs, GIoUs) Calculator."""
def __init__(self, scale=1., dtype=None):
self.scale = scale
self.dtype = dtype
def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate IoU between 2D bboxes.
Args:
bboxes1 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>
format, or shape (m, 5) in <x1, y1, x2, y2, score> format.
bboxes2 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>
format, shape (m, 5) in <x1, y1, x2, y2, score> format, or be
empty. If ``is_aligned `` is ``True``, then m and n must be
equal.
mode (str): "iou" (intersection over union), "iof" (intersection
over foreground), or "giou" (generalized intersection over
union).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
"""
assert bboxes1.size(-1) in [0, 4, 5]
assert bboxes2.size(-1) in [0, 4, 5]
if bboxes2.size(-1) == 5:
bboxes2 = bboxes2[..., :4]
if bboxes1.size(-1) == 5:
bboxes1 = bboxes1[..., :4]
if self.dtype == 'fp16':
# change tensor type to save cpu and cuda memory and keep speed
bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype)
bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype)
overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
if not overlaps.is_cuda and overlaps.dtype == torch.float16:
# resume cpu float32
overlaps = overlaps.float()
return overlaps
return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
def __repr__(self):
"""str: a string describing the module"""
repr_str = self.__class__.__name__ + f'(' \
f'scale={self.scale}, dtype={self.dtype})'
return repr_str
|
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../../examples/sentence_transformer/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SentenceTransformer model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
- `Training Examples > Semantic Textual Similarity <../../examples/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`CoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, CoSENTLoss is recommended.
- :class:`AnglELoss` is :class:`CoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than CosineSimilarityLoss.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CosineSimilarityLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = loss_fct
self.cos_score_transformation = cos_score_transformation
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
output = self.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
return self.loss_fct(output, labels.float().view(-1))
def get_config_dict(self) -> dict[str, Any]:
return {"loss_fct": fullname(self.loss_fct)}
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import (find_latest_checkpoint, get_test_pipeline_cfg,
update_data_root)
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing_utils import (ConfigType, InstanceList, MultiConfig,
OptConfigType, OptInstanceList, OptMultiConfig,
OptPixelList, PixelList, RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType', 'get_test_pipeline_cfg'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import (find_latest_checkpoint, get_test_pipeline_cfg,
update_data_root)
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptPixelList, PixelList,
RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType', 'get_test_pipeline_cfg'
]
|
import importlib
import shutil
import warnings
from typing import List
import fsspec
import fsspec.asyn
from fsspec.implementations.local import LocalFileSystem
from ..utils.deprecation_utils import deprecated
from . import compression
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
@deprecated(
"This function is deprecated and will be removed in a future version. Please use `fsspec.core.strip_protocol` instead."
)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Checks if `fs` is a remote filesystem.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
return not isinstance(fs, LocalFileSystem)
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
if not is_remote_filesystem(fs):
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
|
import importlib
import shutil
import warnings
from typing import List
import fsspec
import fsspec.asyn
from fsspec.implementations.local import LocalFileSystem
from ..utils.deprecation_utils import deprecated
from . import compression
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
@deprecated(
"This function is deprecated and will be removed in a future version. Please use `fsspec.core.strip_protocol` instead."
)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Checks if `fs` is a remote filesystem.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
return not isinstance(fs, LocalFileSystem)
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
if not is_remote_filesystem(fs):
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.3.3.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.3.2"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright (c) OpenMMLab. All rights reserved.
import re
from mmengine.config import Config
def replace_cfg_vals(ori_cfg):
"""Replace the string "${key}" with the corresponding value.
Replace the "${key}" with the value of ori_cfg.key in the config. And
support replacing the chained ${key}. Such as, replace "${key0.key1}"
with the value of cfg.key0.key1. Code is modified from `vars.py
< https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/vars.py>`_ # noqa: E501
Args:
ori_cfg (mmcv.utils.config.Config):
The origin config with "${key}" generated from a file.
Returns:
updated_cfg [mmcv.utils.config.Config]:
The config with "${key}" replaced by the corresponding value.
"""
def get_value(cfg, key):
for k in key.split('.'):
cfg = cfg[k]
return cfg
def replace_value(cfg):
if isinstance(cfg, dict):
return {key: replace_value(value) for key, value in cfg.items()}
elif isinstance(cfg, list):
return [replace_value(item) for item in cfg]
elif isinstance(cfg, tuple):
return tuple([replace_value(item) for item in cfg])
elif isinstance(cfg, str):
# the format of string cfg may be:
# 1) "${key}", which will be replaced with cfg.key directly
# 2) "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx",
# which will be replaced with the string of the cfg.key
keys = pattern_key.findall(cfg)
values = [get_value(ori_cfg, key[2:-1]) for key in keys]
if len(keys) == 1 and keys[0] == cfg:
# the format of string cfg is "${key}"
cfg = values[0]
else:
for key, value in zip(keys, values):
# the format of string cfg is
# "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx"
assert not isinstance(value, (dict, list, tuple)), \
f'for the format of string cfg is ' \
f"'xxxxx${key}xxxxx' or 'xxx${key}xxx${key}xxx', " \
f"the type of the value of '${key}' " \
f'can not be dict, list, or tuple' \
f'but you input {type(value)} in {cfg}'
cfg = cfg.replace(key, str(value))
return cfg
else:
return cfg
# the pattern of string "${key}"
pattern_key = re.compile(r'\$\{[a-zA-Z\d_.]*\}')
# the type of ori_cfg._cfg_dict is mmcv.utils.config.ConfigDict
updated_cfg = Config(
replace_value(ori_cfg._cfg_dict), filename=ori_cfg.filename)
# replace the model with model_wrapper
if updated_cfg.get('model_wrapper', None) is not None:
updated_cfg.model = updated_cfg.model_wrapper
updated_cfg.pop('model_wrapper')
return updated_cfg
|
# Copyright (c) OpenMMLab. All rights reserved.
import re
from mmcv.utils import Config
def replace_cfg_vals(ori_cfg):
"""Replace the string "${key}" with the corresponding value.
Replace the "${key}" with the value of ori_cfg.key in the config. And
support replacing the chained ${key}. Such as, replace "${key0.key1}"
with the value of cfg.key0.key1. Code is modified from `vars.py
< https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/vars.py>`_ # noqa: E501
Args:
ori_cfg (mmcv.utils.config.Config):
The origin config with "${key}" generated from a file.
Returns:
updated_cfg [mmcv.utils.config.Config]:
The config with "${key}" replaced by the corresponding value.
"""
def get_value(cfg, key):
for k in key.split('.'):
cfg = cfg[k]
return cfg
def replace_value(cfg):
if isinstance(cfg, dict):
return {key: replace_value(value) for key, value in cfg.items()}
elif isinstance(cfg, list):
return [replace_value(item) for item in cfg]
elif isinstance(cfg, tuple):
return tuple([replace_value(item) for item in cfg])
elif isinstance(cfg, str):
# the format of string cfg may be:
# 1) "${key}", which will be replaced with cfg.key directly
# 2) "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx",
# which will be replaced with the string of the cfg.key
keys = pattern_key.findall(cfg)
values = [get_value(ori_cfg, key[2:-1]) for key in keys]
if len(keys) == 1 and keys[0] == cfg:
# the format of string cfg is "${key}"
cfg = values[0]
else:
for key, value in zip(keys, values):
# the format of string cfg is
# "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx"
assert not isinstance(value, (dict, list, tuple)), \
f'for the format of string cfg is ' \
f"'xxxxx${key}xxxxx' or 'xxx${key}xxx${key}xxx', " \
f"the type of the value of '${key}' " \
f'can not be dict, list, or tuple' \
f'but you input {type(value)} in {cfg}'
cfg = cfg.replace(key, str(value))
return cfg
else:
return cfg
# the pattern of string "${key}"
pattern_key = re.compile(r'\$\{[a-zA-Z\d_.]*\}')
# the type of ori_cfg._cfg_dict is mmcv.utils.config.ConfigDict
updated_cfg = Config(
replace_value(ori_cfg._cfg_dict), filename=ori_cfg.filename)
# replace the model with model_wrapper
if updated_cfg.get('model_wrapper', None) is not None:
updated_cfg.model = updated_cfg.model_wrapper
updated_cfg.pop('model_wrapper')
return updated_cfg
|
"""
Empty index.
An index that doesn't contain any documents. Can only be used for
pure LLM calls.
"""
from typing import Any, Dict, Optional, Sequence
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.data_structs.data_structs import EmptyIndexStruct
from llama_index.core.indices.base import BaseIndex
from llama_index.core.llms.utils import LLMType
from llama_index.core.schema import BaseNode
from llama_index.core.storage.docstore.types import RefDocInfo
class EmptyIndex(BaseIndex[EmptyIndexStruct]):
"""
Empty Index.
An index that doesn't contain any documents. Used for
pure LLM calls.
NOTE: this exists because an empty index it allows certain properties,
such as the ability to be composed with other indices + token
counting + others.
"""
index_struct_cls = EmptyIndexStruct
def __init__(
self,
index_struct: Optional[EmptyIndexStruct] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
super().__init__(
nodes=None,
index_struct=index_struct or EmptyIndexStruct(),
**kwargs,
)
def as_retriever(self, **kwargs: Any) -> BaseRetriever:
# NOTE: lazy import
from llama_index.core.indices.empty.retrievers import EmptyIndexRetriever
return EmptyIndexRetriever(self)
def as_query_engine(
self, llm: Optional[LLMType] = None, **kwargs: Any
) -> BaseQueryEngine:
if "response_mode" not in kwargs:
kwargs["response_mode"] = "generation"
else:
if kwargs["response_mode"] != "generation":
raise ValueError("EmptyIndex only supports response_mode=generation.")
return super().as_query_engine(llm=llm, **kwargs)
def _build_index_from_nodes(
self, nodes: Sequence[BaseNode], **build_kwargs: Any
) -> EmptyIndexStruct:
"""
Build the index from documents.
Args:
documents (List[BaseDocument]): A list of documents.
Returns:
IndexList: The created summary index.
"""
del nodes # Unused
return EmptyIndexStruct()
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert a document."""
del nodes # Unused
raise NotImplementedError("Cannot insert into an empty index.")
def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None:
"""Delete a node."""
raise NotImplementedError("Cannot delete from an empty index.")
@property
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
"""Retrieve a dict mapping of ingested documents and their nodes+metadata."""
raise NotImplementedError("ref_doc_info not supported for an empty index.")
# legacy
GPTEmptyIndex = EmptyIndex
|
"""Empty index.
An index that doesn't contain any documents. Can only be used for
pure LLM calls.
"""
from typing import Any, Dict, Optional, Sequence
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.data_structs.data_structs import EmptyIndexStruct
from llama_index.core.indices.base import BaseIndex
from llama_index.core.llms.utils import LLMType
from llama_index.core.schema import BaseNode
from llama_index.core.storage.docstore.types import RefDocInfo
class EmptyIndex(BaseIndex[EmptyIndexStruct]):
"""Empty Index.
An index that doesn't contain any documents. Used for
pure LLM calls.
NOTE: this exists because an empty index it allows certain properties,
such as the ability to be composed with other indices + token
counting + others.
"""
index_struct_cls = EmptyIndexStruct
def __init__(
self,
index_struct: Optional[EmptyIndexStruct] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
super().__init__(
nodes=None,
index_struct=index_struct or EmptyIndexStruct(),
**kwargs,
)
def as_retriever(self, **kwargs: Any) -> BaseRetriever:
# NOTE: lazy import
from llama_index.core.indices.empty.retrievers import EmptyIndexRetriever
return EmptyIndexRetriever(self)
def as_query_engine(
self, llm: Optional[LLMType] = None, **kwargs: Any
) -> BaseQueryEngine:
if "response_mode" not in kwargs:
kwargs["response_mode"] = "generation"
else:
if kwargs["response_mode"] != "generation":
raise ValueError("EmptyIndex only supports response_mode=generation.")
return super().as_query_engine(llm=llm, **kwargs)
def _build_index_from_nodes(
self, nodes: Sequence[BaseNode], **build_kwargs: Any
) -> EmptyIndexStruct:
"""Build the index from documents.
Args:
documents (List[BaseDocument]): A list of documents.
Returns:
IndexList: The created summary index.
"""
del nodes # Unused
return EmptyIndexStruct()
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert a document."""
del nodes # Unused
raise NotImplementedError("Cannot insert into an empty index.")
def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None:
"""Delete a node."""
raise NotImplementedError("Cannot delete from an empty index.")
@property
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
"""Retrieve a dict mapping of ingested documents and their nodes+metadata."""
raise NotImplementedError("ref_doc_info not supported for an empty index.")
# legacy
GPTEmptyIndex = EmptyIndex
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmdet.models.dense_heads.fcos_head import FCOSHead
from mmdet.registry import MODELS
@MODELS.register_module()
class NASFCOSHead(FCOSHead):
"""Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.
It is quite similar with FCOS head, except for the searched structure of
classification branch and bbox regression branch, where a structure of
"dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.
"""
def __init__(self, *args, init_cfg=None, **kwargs):
if init_cfg is None:
init_cfg = [
dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']),
dict(
type='Normal',
std=0.01,
override=[
dict(name='conv_reg'),
dict(name='conv_centerness'),
dict(
name='conv_cls',
type='Normal',
std=0.01,
bias_prob=0.01)
]),
]
super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
dconv3x3_config = dict(
type='DCNv2',
kernel_size=3,
use_bias=True,
deform_groups=2,
padding=1)
conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
conv1x1_config = dict(type='Conv', kernel_size=1)
self.arch_config = [
dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
]
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i, op_ in enumerate(self.arch_config):
op = copy.deepcopy(op_)
chn = self.in_channels if i == 0 else self.feat_channels
assert isinstance(op, dict)
use_bias = op.pop('use_bias', False)
padding = op.pop('padding', 0)
kernel_size = op.pop('kernel_size')
module = ConvModule(
chn,
self.feat_channels,
kernel_size,
stride=1,
padding=padding,
norm_cfg=self.norm_cfg,
bias=use_bias,
conv_cfg=op)
self.cls_convs.append(copy.deepcopy(module))
self.reg_convs.append(copy.deepcopy(module))
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale
from mmdet.models.dense_heads.fcos_head import FCOSHead
from ..builder import HEADS
@HEADS.register_module()
class NASFCOSHead(FCOSHead):
"""Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.
It is quite similar with FCOS head, except for the searched structure of
classification branch and bbox regression branch, where a structure of
"dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.
"""
def __init__(self, *args, init_cfg=None, **kwargs):
if init_cfg is None:
init_cfg = [
dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']),
dict(
type='Normal',
std=0.01,
override=[
dict(name='conv_reg'),
dict(name='conv_centerness'),
dict(
name='conv_cls',
type='Normal',
std=0.01,
bias_prob=0.01)
]),
]
super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
dconv3x3_config = dict(
type='DCNv2',
kernel_size=3,
use_bias=True,
deform_groups=2,
padding=1)
conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
conv1x1_config = dict(type='Conv', kernel_size=1)
self.arch_config = [
dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
]
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i, op_ in enumerate(self.arch_config):
op = copy.deepcopy(op_)
chn = self.in_channels if i == 0 else self.feat_channels
assert isinstance(op, dict)
use_bias = op.pop('use_bias', False)
padding = op.pop('padding', 0)
kernel_size = op.pop('kernel_size')
module = ConvModule(
chn,
self.feat_channels,
kernel_size,
stride=1,
padding=padding,
norm_cfg=self.norm_cfg,
bias=use_bias,
conv_cfg=op)
self.cls_convs.append(copy.deepcopy(module))
self.reg_convs.append(copy.deepcopy(module))
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
|
import random
import pytest
from jina import Document, DocumentArray
from lightgbm_ranker import LightGBMRanker
NUM_DOCS = 1000
NUM_MATCHES = 5
@pytest.fixture
def ranker():
return LightGBMRanker(
query_features=['brand_query', 'price_query'],
match_features=['brand_match', 'price_match'],
relevance_label='relevance',
)
@pytest.fixture
def ranker_with_categorical_features():
return LightGBMRanker(
query_features=['price_query'],
match_features=['price_match'],
relevance_label='relevance',
categorical_query_features=['brand_query'],
categorical_match_features=['brand_match'],
)
@pytest.fixture
def documents_to_train_price_sensitive_model():
"""features: color, brand, price. Label relevance"""
# price sensitive, relevance based on pure price, cheaper relevance higher.
da = DocumentArray()
for _ in range(NUM_DOCS):
root = Document(
tags={'price_query': random.randint(200, 500), 'brand_query': 1}
)
for _ in range(NUM_MATCHES):
root_price = root.tags['price_query']
root.matches.extend(
[
Document(
tags={
'price_match': root_price - 100,
'brand_match': 3,
'relevance': 10,
}
),
Document(
tags={
'price_match': root_price,
'brand_match': 3,
'relevance': 6,
}
),
Document(
tags={
'price_match': root_price + 100,
'brand_match': 3,
'relevance': 4,
}
),
Document(
tags={
'price_match': root_price + 200,
'brand_match': 3,
'relevance': 2,
}
),
]
)
da.append(root)
return da
@pytest.fixture
def documents_random_brand():
"""features: color, brand, price. Label relevance"""
# expect price
da = DocumentArray()
d1 = Document(tags={'brand_query': 2, 'price_query': 200})
d1.matches.append(
Document(id=1, tags={'brand_match': 2, 'price_match': 405, 'relevance': 3})
)
d1.matches.append(
Document(id=2, tags={'brand_match': 2, 'price_match': 305, 'relevance': 3})
)
d1.matches.append(
Document(id=3, tags={'brand_match': 2, 'price_match': 96, 'relevance': 3})
)
d1.matches.append(
Document(id=4, tags={'brand_match': 2, 'price_match': 200, 'relevance': 3})
)
da.append(d1)
return da
|
import random
import pytest
from jina import Document, DocumentArray
from ..lightgbm_ranker import LightGBMRanker
NUM_DOCS = 1000
NUM_MATCHES = 5
@pytest.fixture
def ranker():
return LightGBMRanker(
query_features=['brand_query', 'price_query'],
match_features=['brand_match', 'price_match'],
relevance_label='relevance',
)
@pytest.fixture
def ranker_with_categorical_features():
return LightGBMRanker(
query_features=['price_query'],
match_features=['price_match'],
relevance_label='relevance',
categorical_query_features=['brand_query'],
categorical_match_features=['brand_match'],
)
@pytest.fixture
def documents_to_train_price_sensitive_model():
"""features: color, brand, price. Label relevance"""
# price sensitive, relevance based on pure price, cheaper relevance higher.
da = DocumentArray()
for _ in range(NUM_DOCS):
root = Document(
tags={'price_query': random.randint(200, 500), 'brand_query': 1}
)
for _ in range(NUM_MATCHES):
root_price = root.tags['price_query']
root.matches.extend(
[
Document(
tags={
'price_match': root_price - 100,
'brand_match': 3,
'relevance': 10,
}
),
Document(
tags={
'price_match': root_price,
'brand_match': 3,
'relevance': 6,
}
),
Document(
tags={
'price_match': root_price + 100,
'brand_match': 3,
'relevance': 4,
}
),
Document(
tags={
'price_match': root_price + 200,
'brand_match': 3,
'relevance': 2,
}
),
]
)
da.append(root)
return da
@pytest.fixture
def documents_random_brand():
"""features: color, brand, price. Label relevance"""
# expect price
da = DocumentArray()
d1 = Document(tags={'brand_query': 2, 'price_query': 200})
d1.matches.append(
Document(id=1, tags={'brand_match': 2, 'price_match': 405, 'relevance': 3})
)
d1.matches.append(
Document(id=2, tags={'brand_match': 2, 'price_match': 305, 'relevance': 3})
)
d1.matches.append(
Document(id=3, tags={'brand_match': 2, 'price_match': 96, 'relevance': 3})
)
d1.matches.append(
Document(id=4, tags={'brand_match': 2, 'price_match': 200, 'relevance': 3})
)
da.append(d1)
return da
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_detection.py',
'../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
# [7] yields higher performance than [6]
step=[7])
runner = dict(
type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64
log_config = dict(interval=100)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_detection.py',
'../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
# [7] yields higher performance than [6]
step=[7])
runner = dict(
type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64
log_config = dict(interval=100)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEF1Evaluator, CESoftmaxAccuracyEvaluator
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# As dataset, we use SNLI + MultiNLI
# Check if dataset exists. If not, download and extract it
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
# Read the AllNLI.tsv.gz file and create the training dataset
logger.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_samples = []
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
label_id = label2int[row["label"]]
if row["split"] == "train":
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
else:
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Define our CrossEncoder model. We use distilroberta-base as basis and setup it up to predict 3 labels
model = CrossEncoder("distilroberta-base", num_labels=len(label2int))
# We wrap train_samples, which is a list of InputExample, in a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# During training, we use CESoftmaxAccuracyEvaluator and CEF1Evaluator to measure the performance on the dev set
accuracy_evaluator = CESoftmaxAccuracyEvaluator.from_input_examples(dev_samples, name="AllNLI-dev")
f1_evaluator = CEF1Evaluator.from_input_examples(dev_samples, name="AllNLI-dev")
evaluator = SequentialEvaluator([accuracy_evaluator, f1_evaluator])
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=10000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEF1Evaluator, CESoftmaxAccuracyEvaluator
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# As dataset, we use SNLI + MultiNLI
# Check if dataset exists. If not, download and extract it
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
# Read the AllNLI.tsv.gz file and create the training dataset
logger.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_samples = []
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
label_id = label2int[row["label"]]
if row["split"] == "train":
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
else:
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Define our CrossEncoder model. We use distilroberta-base as basis and setup it up to predict 3 labels
model = CrossEncoder("distilroberta-base", num_labels=len(label2int))
# We wrap train_samples, which is a list of InputExample, in a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# During training, we use CESoftmaxAccuracyEvaluator and CEF1Evaluator to measure the performance on the dev set
accuracy_evaluator = CESoftmaxAccuracyEvaluator.from_input_examples(dev_samples, name="AllNLI-dev")
f1_evaluator = CEF1Evaluator.from_input_examples(dev_samples, name="AllNLI-dev")
evaluator = SequentialEvaluator([accuracy_evaluator, f1_evaluator])
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=10000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
import os
from pathlib import Path
from torchaudio.datasets import gtzan
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_samples = []
mocked_training = []
mocked_validation = []
mocked_testing = []
sample_rate = 22050
seed = 0
for genre in gtzan.gtzan_genres:
base_dir = os.path.join(root_dir, "genres", genre)
os.makedirs(base_dir, exist_ok=True)
for i in range(100):
filename = f"{genre}.{i:05d}"
path = os.path.join(base_dir, f"{filename}.wav")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="int16", seed=seed)
save_wav(path, data, sample_rate)
sample = (normalize_wav(data), sample_rate, genre)
mocked_samples.append(sample)
if filename in gtzan.filtered_test:
mocked_testing.append(sample)
if filename in gtzan.filtered_train:
mocked_training.append(sample)
if filename in gtzan.filtered_valid:
mocked_validation.append(sample)
seed += 1
return (mocked_samples, mocked_training, mocked_validation, mocked_testing)
class TestGTZAN(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
training = []
validation = []
testing = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
mocked_data = get_mock_dataset(cls.root_dir)
cls.samples = mocked_data[0]
cls.training = mocked_data[1]
cls.validation = mocked_data[2]
cls.testing = mocked_data[3]
def test_no_subset(self):
dataset = gtzan.GTZAN(self.root_dir)
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert label == self.samples[i][2]
n_ite += 1
assert n_ite == len(self.samples)
def _test_training(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.training[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.training[i][1]
assert label == self.training[i][2]
n_ite += 1
assert n_ite == len(self.training)
def _test_validation(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.validation[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.validation[i][1]
assert label == self.validation[i][2]
n_ite += 1
assert n_ite == len(self.validation)
def _test_testing(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.testing[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.testing[i][1]
assert label == self.testing[i][2]
n_ite += 1
assert n_ite == len(self.testing)
def test_training_str(self):
train_dataset = gtzan.GTZAN(self.root_dir, subset="training")
self._test_training(train_dataset)
def test_validation_str(self):
val_dataset = gtzan.GTZAN(self.root_dir, subset="validation")
self._test_validation(val_dataset)
def test_testing_str(self):
test_dataset = gtzan.GTZAN(self.root_dir, subset="testing")
self._test_testing(test_dataset)
def test_training_path(self):
root_dir = Path(self.root_dir)
train_dataset = gtzan.GTZAN(root_dir, subset="training")
self._test_training(train_dataset)
def test_validation_path(self):
root_dir = Path(self.root_dir)
val_dataset = gtzan.GTZAN(root_dir, subset="validation")
self._test_validation(val_dataset)
def test_testing_path(self):
root_dir = Path(self.root_dir)
test_dataset = gtzan.GTZAN(root_dir, subset="testing")
self._test_testing(test_dataset)
|
import os
from pathlib import Path
from torchaudio.datasets import gtzan
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_samples = []
mocked_training = []
mocked_validation = []
mocked_testing = []
sample_rate = 22050
seed = 0
for genre in gtzan.gtzan_genres:
base_dir = os.path.join(root_dir, "genres", genre)
os.makedirs(base_dir, exist_ok=True)
for i in range(100):
filename = f"{genre}.{i:05d}"
path = os.path.join(base_dir, f"{filename}.wav")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="int16", seed=seed)
save_wav(path, data, sample_rate)
sample = (normalize_wav(data), sample_rate, genre)
mocked_samples.append(sample)
if filename in gtzan.filtered_test:
mocked_testing.append(sample)
if filename in gtzan.filtered_train:
mocked_training.append(sample)
if filename in gtzan.filtered_valid:
mocked_validation.append(sample)
seed += 1
return (mocked_samples, mocked_training, mocked_validation, mocked_testing)
class TestGTZAN(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
training = []
validation = []
testing = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
mocked_data = get_mock_dataset(cls.root_dir)
cls.samples = mocked_data[0]
cls.training = mocked_data[1]
cls.validation = mocked_data[2]
cls.testing = mocked_data[3]
def test_no_subset(self):
dataset = gtzan.GTZAN(self.root_dir)
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert label == self.samples[i][2]
n_ite += 1
assert n_ite == len(self.samples)
def _test_training(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.training[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.training[i][1]
assert label == self.training[i][2]
n_ite += 1
assert n_ite == len(self.training)
def _test_validation(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.validation[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.validation[i][1]
assert label == self.validation[i][2]
n_ite += 1
assert n_ite == len(self.validation)
def _test_testing(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.testing[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.testing[i][1]
assert label == self.testing[i][2]
n_ite += 1
assert n_ite == len(self.testing)
def test_training_str(self):
train_dataset = gtzan.GTZAN(self.root_dir, subset="training")
self._test_training(train_dataset)
def test_validation_str(self):
val_dataset = gtzan.GTZAN(self.root_dir, subset="validation")
self._test_validation(val_dataset)
def test_testing_str(self):
test_dataset = gtzan.GTZAN(self.root_dir, subset="testing")
self._test_testing(test_dataset)
def test_training_path(self):
root_dir = Path(self.root_dir)
train_dataset = gtzan.GTZAN(root_dir, subset="training")
self._test_training(train_dataset)
def test_validation_path(self):
root_dir = Path(self.root_dir)
val_dataset = gtzan.GTZAN(root_dir, subset="validation")
self._test_validation(val_dataset)
def test_testing_path(self):
root_dir = Path(self.root_dir)
test_dataset = gtzan.GTZAN(root_dir, subset="testing")
self._test_testing(test_dataset)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .sparse_rcnn import SparseRCNN
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN'
]
|
from .atss import ATSS
from .autoassign import AutoAssign
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .centernet import CenterNet
from .cornernet import CornerNet
from .deformable_detr import DeformableDETR
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .panoptic_fpn import PanopticFPN
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
from .point_rend import PointRend
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .scnet import SCNet
from .single_stage import SingleStageDetector
from .sparse_rcnn import SparseRCNN
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .yolof import YOLOF
from .yolox import YOLOX
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',
'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',
'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',
'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',
'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet',
'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',
'TwoStagePanopticSegmentor', 'PanopticFPN'
]
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
class LayerNorm(Module):
config_keys: list[str] = ["dimension"]
def __init__(self, dimension: int):
super().__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: dict[str, Tensor]):
features["sentence_embedding"] = self.norm(features["sentence_embedding"])
return features
def get_sentence_embedding_dimension(self):
return self.dimension
def save(self, output_path, safe_serialization: bool = True) -> None:
self.save_config(output_path)
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
class LayerNorm(nn.Module):
def __init__(self, dimension: int):
super().__init__()
self.dimension = dimension
self.norm = nn.LayerNorm(dimension)
def forward(self, features: dict[str, Tensor]):
features["sentence_embedding"] = self.norm(features["sentence_embedding"])
return features
def get_sentence_embedding_dimension(self):
return self.dimension
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dimension": self.dimension}, fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = LayerNorm(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
import asana
class AsanaReader(BaseReader):
"""
Asana reader. Reads data from an Asana workspace.
Args:
asana_token (str): Asana token.
"""
def __init__(self, asana_token: str) -> None:
"""Initialize Asana reader."""
self.client = asana.Client.access_token(asana_token)
def load_data(
self, workspace_id: Optional[str] = None, project_id: Optional[str] = None
) -> List[Document]:
"""
Load data from the workspace.
Args:
workspace_id (Optional[str], optional): Workspace ID. Defaults to None.
project_id (Optional[str], optional): Project ID. Defaults to None.
Returns:
List[Document]: List of documents.
"""
if workspace_id is None and project_id is None:
raise ValueError("Either workspace_id or project_id must be provided")
if workspace_id is not None and project_id is not None:
raise ValueError(
"Only one of workspace_id or project_id should be provided"
)
results = []
if workspace_id is not None:
workspace_name = self.client.workspaces.find_by_id(workspace_id)["name"]
projects = self.client.projects.find_all({"workspace": workspace_id})
# Case: Only project_id is provided
else: # since we've handled the other cases, this means project_id is not None
projects = [self.client.projects.find_by_id(project_id)]
workspace_name = projects[0]["workspace"]["name"]
for project in projects:
tasks = self.client.tasks.find_all(
{
"project": project["gid"],
"opt_fields": "name,notes,completed,completed_at,completed_by,assignee,followers,custom_fields",
}
)
for task in tasks:
stories = self.client.tasks.stories(task["gid"], opt_fields="type,text")
comments = "\n".join(
[
story["text"]
for story in stories
if story.get("type") == "comment" and "text" in story
]
)
task_metadata = {
"task_id": task.get("gid", ""),
"name": task.get("name", ""),
"assignee": (task.get("assignee") or {}).get("name", ""),
"completed_on": task.get("completed_at", ""),
"completed_by": (task.get("completed_by") or {}).get("name", ""),
"project_name": project.get("name", ""),
"custom_fields": [
i["display_value"]
for i in task.get("custom_fields")
if task.get("custom_fields") is not None
],
"workspace_name": workspace_name,
"url": f"https://app.asana.com/0/{project['gid']}/{task['gid']}",
}
if task.get("followers") is not None:
task_metadata["followers"] = [
i.get("name") for i in task.get("followers") if "name" in i
]
else:
task_metadata["followers"] = []
results.append(
Document(
text=task.get("name", "")
+ " "
+ task.get("notes", "")
+ " "
+ comments,
extra_info=task_metadata,
)
)
return results
|
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
import asana
class AsanaReader(BaseReader):
"""Asana reader. Reads data from an Asana workspace.
Args:
asana_token (str): Asana token.
"""
def __init__(self, asana_token: str) -> None:
"""Initialize Asana reader."""
self.client = asana.Client.access_token(asana_token)
def load_data(
self, workspace_id: Optional[str] = None, project_id: Optional[str] = None
) -> List[Document]:
"""Load data from the workspace.
Args:
workspace_id (Optional[str], optional): Workspace ID. Defaults to None.
project_id (Optional[str], optional): Project ID. Defaults to None.
Returns:
List[Document]: List of documents.
"""
if workspace_id is None and project_id is None:
raise ValueError("Either workspace_id or project_id must be provided")
if workspace_id is not None and project_id is not None:
raise ValueError(
"Only one of workspace_id or project_id should be provided"
)
results = []
if workspace_id is not None:
workspace_name = self.client.workspaces.find_by_id(workspace_id)["name"]
projects = self.client.projects.find_all({"workspace": workspace_id})
# Case: Only project_id is provided
else: # since we've handled the other cases, this means project_id is not None
projects = [self.client.projects.find_by_id(project_id)]
workspace_name = projects[0]["workspace"]["name"]
for project in projects:
tasks = self.client.tasks.find_all(
{
"project": project["gid"],
"opt_fields": "name,notes,completed,completed_at,completed_by,assignee,followers,custom_fields",
}
)
for task in tasks:
stories = self.client.tasks.stories(task["gid"], opt_fields="type,text")
comments = "\n".join(
[
story["text"]
for story in stories
if story.get("type") == "comment" and "text" in story
]
)
task_metadata = {
"task_id": task.get("gid", ""),
"name": task.get("name", ""),
"assignee": (task.get("assignee") or {}).get("name", ""),
"completed_on": task.get("completed_at", ""),
"completed_by": (task.get("completed_by") or {}).get("name", ""),
"project_name": project.get("name", ""),
"custom_fields": [
i["display_value"]
for i in task.get("custom_fields")
if task.get("custom_fields") is not None
],
"workspace_name": workspace_name,
"url": f"https://app.asana.com/0/{project['gid']}/{task['gid']}",
}
if task.get("followers") is not None:
task_metadata["followers"] = [
i.get("name") for i in task.get("followers") if "name" in i
]
else:
task_metadata["followers"] = []
results.append(
Document(
text=task.get("name", "")
+ " "
+ task.get("notes", "")
+ " "
+ comments,
extra_info=task_metadata,
)
)
return results
|
from typing import Any, Dict
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class UniformTemporalSubsample(Transform):
_transformed_types = (is_simple_tensor, datapoints.Video)
def __init__(self, num_samples: int):
super().__init__()
self.num_samples = num_samples
def _transform(self, inpt: datapoints._VideoType, params: Dict[str, Any]) -> datapoints._VideoType:
return F.uniform_temporal_subsample(inpt, self.num_samples)
|
from typing import Any, Dict
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class UniformTemporalSubsample(Transform):
_transformed_types = (is_simple_tensor, datapoints.Video)
def __init__(self, num_samples: int):
super().__init__()
self.num_samples = num_samples
def _transform(self, inpt: datapoints.VideoType, params: Dict[str, Any]) -> datapoints.VideoType:
return F.uniform_temporal_subsample(inpt, self.num_samples)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize as deserialize
from keras.src.initializers import get as get
from keras.src.initializers import serialize as serialize
from keras.src.initializers.constant_initializers import STFT as STFT
from keras.src.initializers.constant_initializers import STFT as STFTInitializer
from keras.src.initializers.constant_initializers import STFT as stft
from keras.src.initializers.constant_initializers import Constant as Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity as Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones as Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import Zeros as Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer as Initializer
from keras.src.initializers.random_initializers import (
GlorotNormal as GlorotNormal,
)
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import (
GlorotUniform as GlorotUniform,
)
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal as HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform as HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import (
LecunNormal as LecunNormal,
)
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import (
LecunUniform as LecunUniform,
)
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import Orthogonal as Orthogonal
from keras.src.initializers.random_initializers import (
Orthogonal as OrthogonalInitializer,
)
from keras.src.initializers.random_initializers import Orthogonal as orthogonal
from keras.src.initializers.random_initializers import (
RandomNormal as RandomNormal,
)
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import (
RandomUniform as RandomUniform,
)
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import (
TruncatedNormal as TruncatedNormal,
)
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import (
VarianceScaling as VarianceScaling,
)
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import STFT
from keras.src.initializers.constant_initializers import STFT as STFTInitializer
from keras.src.initializers.constant_initializers import STFT as stft
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import Orthogonal
from keras.src.initializers.random_initializers import (
Orthogonal as OrthogonalInitializer,
)
from keras.src.initializers.random_initializers import Orthogonal as orthogonal
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
|
_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py']
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5))
|
_base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py']
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5))
|
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
from docarray.typing import TorchTensor
torch_imported = True
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
from docarray.typing import ( # TorchTensor,
ID,
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
)
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
ndarray=NdArray,
embedding=Embedding,
any_url=AnyUrl,
text_url=TextUrl,
image_url=ImageUrl,
mesh_url=Mesh3DUrl,
point_cloud_url=PointCloud3DUrl,
id=ID,
)
if torch_imported:
content_type_dict['torch_tensor'] = TorchTensor
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls.construct(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
from docarray.proto import DocumentProto, NodeProto
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> 'NodeProto':
from docarray.proto import NodeProto
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
from docarray.typing import TorchTensor
torch_imported = True
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
from docarray.typing import ( # TorchTensor,
ID,
AnyUrl,
Embedding,
ImageUrl,
NdArray,
TextUrl,
)
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
ndarray=NdArray,
embedding=Embedding,
any_url=AnyUrl,
text_url=TextUrl,
image_url=ImageUrl,
id=ID,
)
if torch_imported:
content_type_dict['torch_tensor'] = TorchTensor
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls.construct(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
from docarray.proto import DocumentProto, NodeProto
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> 'NodeProto':
from docarray.proto import NodeProto
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataElement
from .base import BaseEvaluator
class ComposedEvaluator:
"""Wrapper class to compose multiple :class:`BaseEvaluator` instances.
Args:
evaluators (Sequence[BaseEvaluator]): The evaluators to compose.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
"""
def __init__(self,
evaluators: Sequence[BaseEvaluator],
collect_device='cpu'):
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.evaluators = evaluators
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
for evaluator in self.evaluators:
evaluator.dataset_meta = dataset_meta
def process(self, data_batch: Sequence[Tuple[Any, BaseDataElement]],
predictions: Sequence[BaseDataElement]):
"""Invoke process method of each wrapped evaluator.
Args:
data_batch (Sequence[Tuple[Any, BaseDataElement]]): A batch of data
from the dataloader.
predictions (Sequence[BaseDataElement]): A batch of outputs from
the model.
"""
for evalutor in self.evaluators:
evalutor.process(data_batch, predictions)
def evaluate(self, size: int) -> dict:
"""Invoke evaluate method of each wrapped evaluator and collect the
metrics dict.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data base on
this size.
Returns:
dict: Evaluation metrics of all wrapped evaluators. The keys are
the names of the metrics, and the values are corresponding results.
"""
metrics = {}
for evaluator in self.evaluators:
_metrics = evaluator.evaluate(size)
# Check metric name conflicts
for name in _metrics.keys():
if name in metrics:
raise ValueError(
'There are multiple evaluators with the same metric '
f'name {name}')
metrics.update(_metrics)
return metrics
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataSample
from .base import BaseEvaluator
class ComposedEvaluator:
"""Wrapper class to compose multiple :class:`BaseEvaluator` instances.
Args:
evaluators (Sequence[BaseEvaluator]): The evaluators to compose.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
"""
def __init__(self,
evaluators: Sequence[BaseEvaluator],
collect_device='cpu'):
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.evaluators = evaluators
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
for evaluator in self.evaluators:
evaluator.dataset_meta = dataset_meta
def process(self, data_batch: Sequence[Tuple[Any, BaseDataSample]],
predictions: Sequence[BaseDataSample]):
"""Invoke process method of each wrapped evaluator.
Args:
data_batch (Sequence[Tuple[Any, BaseDataSample]]): A batch of data
from the dataloader.
predictions (Sequence[BaseDataSample]): A batch of outputs from
the model.
"""
for evalutor in self.evaluators:
evalutor.process(data_batch, predictions)
def evaluate(self, size: int) -> dict:
"""Invoke evaluate method of each wrapped evaluator and collect the
metrics dict.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data base on
this size.
Returns:
dict: Evaluation metrics of all wrapped evaluators. The keys are
the names of the metrics, and the values are corresponding results.
"""
metrics = {}
for evaluator in self.evaluators:
_metrics = evaluator.evaluate(size)
# Check metric name conflicts
for name in _metrics.keys():
if name in metrics:
raise ValueError(
'There are multiple evaluators with the same metric '
f'name {name}')
metrics.update(_metrics)
return metrics
|
from langchain.chains.router.base import MultiRouteChain, RouterChain
from langchain.chains.router.llm_router import LLMRouterChain
from langchain.chains.router.multi_prompt import MultiPromptChain
from langchain.chains.router.multi_retrieval_qa import MultiRetrievalQAChain
__all__ = [
"LLMRouterChain",
"MultiPromptChain",
"MultiRetrievalQAChain",
"MultiRouteChain",
"RouterChain",
]
|
from langchain.chains.router.base import MultiRouteChain, RouterChain
from langchain.chains.router.llm_router import LLMRouterChain
from langchain.chains.router.multi_prompt import MultiPromptChain
from langchain.chains.router.multi_retrieval_qa import MultiRetrievalQAChain
__all__ = [
"RouterChain",
"MultiRouteChain",
"MultiPromptChain",
"MultiRetrievalQAChain",
"LLMRouterChain",
]
|
import os
import time
import pytest
from jina import Flow, Executor
class SlowExecutor(Executor):
def close(self) -> None:
with open(os.path.join(self.metas.workspace, 'test'), 'w', encoding='utf-8') as f:
time.sleep(10)
f.write('x')
@pytest.mark.slow
def test_slow_executor_close(tmpdir):
with Flow().add(
uses={'jtype': 'SlowExecutor', 'with': {}, 'metas': {'workspace': str(tmpdir)}}
) as f:
pass
assert os.path.exists(os.path.join(tmpdir, 'test'))
|
import os
import time
import pytest
from jina import Flow, Executor
class SlowExecutor(Executor):
def close(self) -> None:
with open(os.path.join(self.metas.workspace, 'test'), 'w') as f:
time.sleep(10)
f.write('x')
@pytest.mark.slow
def test_slow_executor_close(tmpdir):
with Flow().add(
uses={'jtype': 'SlowExecutor', 'with': {}, 'metas': {'workspace': str(tmpdir)}}
) as f:
pass
assert os.path.exists(os.path.join(tmpdir, 'test'))
|
"""Standard LangChain interface tests"""
import os
from typing import Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
"stream_usage": True,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
|
"""Standard LangChain interface tests"""
import os
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_openai import AzureChatOpenAI
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
class TestAzureOpenAIStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"],
"model": "gpt-4o-mini",
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
class TestAzureOpenAIStandardLegacy(ChatModelIntegrationTests):
"""Test a legacy model."""
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": os.environ["AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME"],
"openai_api_version": OPENAI_API_VERSION,
"azure_endpoint": OPENAI_API_BASE,
}
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
|
import math
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.layers.GaussianDropout")
class GaussianDropout(layers.Layer):
"""Apply multiplicative 1-centered Gaussian noise.
As it is a regularization layer, it is only active at training time.
Args:
rate: Float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
seed: Integer, optional random seed to enable deterministic behavior.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self._build_at_init()
def call(self, inputs, training=False):
if training and self.rate > 0:
stddev = math.sqrt(self.rate / (1.0 - self.rate))
return inputs * backend.random.normal(
shape=ops.shape(inputs),
mean=1.0,
stddev=stddev,
dtype=self.compute_dtype,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
}
return {**base_config, **config}
|
import math
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.layers.GaussianDropout")
class GaussianDropout(layers.Layer):
"""Apply multiplicative 1-centered Gaussian noise.
As it is a regularization layer, it is only active at training time.
Args:
rate: Float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
seed: Integer, optional random seed to enable deterministic behavior.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self.built = True
def call(self, inputs, training=False):
if training and self.rate > 0:
stddev = math.sqrt(self.rate / (1.0 - self.rate))
return inputs * backend.random.normal(
shape=ops.shape(inputs),
mean=1.0,
stddev=stddev,
dtype=self.compute_dtype,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
}
return {**base_config, **config}
|
import os
from typing import Dict, Tuple
import numpy as np
from jina import Document, DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
class CrudIndexer(Executor):
"""Simple indexer class"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.logger = JinaLogger('CrudIndexer')
self._docs = DocumentArray()
self._dump_location = os.path.join(self.metas.workspace, 'docs.json')
if os.path.exists(self._dump_location):
self._docs = DocumentArray.load_json(self._dump_location)
self.logger.debug(f'Loaded {len(self._docs)} from {self._dump_location}')
else:
self.logger.warning(f'No data found at {self._dump_location}')
@requests(on='/index')
def index(self, docs: DocumentArray, **kwargs):
self._docs.extend(docs)
@requests(on='/update')
def update(self, docs: DocumentArray, **kwargs):
self.delete(docs)
self.index(docs)
def close(self) -> None:
self.logger.debug(f'Dumping {len(self._docs)} to {self._dump_location}')
self._docs.save_json(self._dump_location)
@requests(on='/delete')
def delete(self, docs: DocumentArray, **kwargs):
# TODO we can do del _docs[d.id] once
# tests.unit.types.arrays.test_documentarray.test_delete_by_id is fixed
ids_to_delete = [d.id for d in docs]
idx_to_delete = []
for i, doc in enumerate(self._docs):
if doc.id in ids_to_delete:
idx_to_delete.append(i)
for i in sorted(idx_to_delete, reverse=True):
del self._docs[i]
@requests(on='/search')
def search(self, docs: DocumentArray, parameters: Dict, **kwargs):
top_k = int(parameters.get('top_k', 1))
a = np.stack(docs[:, 'embedding'])
b = np.stack(self._docs[:, 'embedding'])
q_emb = _ext_A(_norm(a))
d_emb = _ext_B(_norm(b))
dists = _cosine(q_emb, d_emb)
idx, dist = self._get_sorted_top_k(dists, top_k)
for _q, _ids, _dists in zip(docs, idx, dist):
for _id, _dist in zip(_ids, _dists):
d = Document(self._docs[int(_id)], copy=True)
d.scores['cosine'].value = 1 - _dist
_q.matches.append(d)
@staticmethod
def _get_sorted_top_k(
dist: 'np.array', top_k: int
) -> Tuple['np.ndarray', 'np.ndarray']:
if top_k >= dist.shape[1]:
idx = dist.argsort(axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx, axis=1)
else:
idx_ps = dist.argpartition(kth=top_k, axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx_ps, axis=1)
idx_fs = dist.argsort(axis=1)
idx = np.take_along_axis(idx_ps, idx_fs, axis=1)
dist = np.take_along_axis(dist, idx_fs, axis=1)
return idx, dist
def _get_ones(x, y):
return np.ones((x, y))
def _ext_A(A):
nA, dim = A.shape
A_ext = _get_ones(nA, dim * 3)
A_ext[:, dim : 2 * dim] = A
A_ext[:, 2 * dim :] = A**2
return A_ext
def _ext_B(B):
nB, dim = B.shape
B_ext = _get_ones(dim * 3, nB)
B_ext[:dim] = (B**2).T
B_ext[dim : 2 * dim] = -2.0 * B.T
del B
return B_ext
def _euclidean(A_ext, B_ext):
sqdist = A_ext.dot(B_ext).clip(min=0)
return np.sqrt(sqdist)
def _norm(A):
return A / np.linalg.norm(A, ord=2, axis=1, keepdims=True)
def _cosine(A_norm_ext, B_norm_ext):
return A_norm_ext.dot(B_norm_ext).clip(min=0) / 2
|
import os
from typing import Dict, Tuple
import numpy as np
from jina import Document, DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
class CrudIndexer(Executor):
"""Simple indexer class"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.logger = JinaLogger('CrudIndexer')
self._docs = DocumentArray()
self._dump_location = os.path.join(self.metas.workspace, 'docs.json')
if os.path.exists(self._dump_location):
self._docs = DocumentArray.load_json(self._dump_location)
self.logger.debug(f'Loaded {len(self._docs)} from {self._dump_location}')
else:
self.logger.warning(f'No data found at {self._dump_location}')
@requests(on='/index')
def index(self, docs: 'DocumentArray', **kwargs):
self._docs.extend(docs)
@requests(on='/update')
def update(self, docs: 'DocumentArray', **kwargs):
self.delete(docs)
self.index(docs)
def close(self) -> None:
self.logger.debug(f'Dumping {len(self._docs)} to {self._dump_location}')
self._docs.save_json(self._dump_location)
@requests(on='/delete')
def delete(self, docs: 'DocumentArray', **kwargs):
# TODO we can do del _docs[d.id] once
# tests.unit.types.arrays.test_documentarray.test_delete_by_id is fixed
ids_to_delete = [d.id for d in docs]
idx_to_delete = []
for i, doc in enumerate(self._docs):
if doc.id in ids_to_delete:
idx_to_delete.append(i)
for i in sorted(idx_to_delete, reverse=True):
del self._docs[i]
@requests(on='/search')
def search(self, docs: 'DocumentArray', parameters: Dict, **kwargs):
top_k = int(parameters.get('top_k', 1))
a = np.stack(docs[:, 'embedding'])
b = np.stack(self._docs[:, 'embedding'])
q_emb = _ext_A(_norm(a))
d_emb = _ext_B(_norm(b))
dists = _cosine(q_emb, d_emb)
idx, dist = self._get_sorted_top_k(dists, top_k)
for _q, _ids, _dists in zip(docs, idx, dist):
for _id, _dist in zip(_ids, _dists):
d = Document(self._docs[int(_id)], copy=True)
d.scores['cosine'].value = 1 - _dist
_q.matches.append(d)
@staticmethod
def _get_sorted_top_k(
dist: 'np.array', top_k: int
) -> Tuple['np.ndarray', 'np.ndarray']:
if top_k >= dist.shape[1]:
idx = dist.argsort(axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx, axis=1)
else:
idx_ps = dist.argpartition(kth=top_k, axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx_ps, axis=1)
idx_fs = dist.argsort(axis=1)
idx = np.take_along_axis(idx_ps, idx_fs, axis=1)
dist = np.take_along_axis(dist, idx_fs, axis=1)
return idx, dist
def _get_ones(x, y):
return np.ones((x, y))
def _ext_A(A):
nA, dim = A.shape
A_ext = _get_ones(nA, dim * 3)
A_ext[:, dim : 2 * dim] = A
A_ext[:, 2 * dim :] = A**2
return A_ext
def _ext_B(B):
nB, dim = B.shape
B_ext = _get_ones(dim * 3, nB)
B_ext[:dim] = (B**2).T
B_ext[dim : 2 * dim] = -2.0 * B.T
del B
return B_ext
def _euclidean(A_ext, B_ext):
sqdist = A_ext.dot(B_ext).clip(min=0)
return np.sqrt(sqdist)
def _norm(A):
return A / np.linalg.norm(A, ord=2, axis=1, keepdims=True)
def _cosine(A_norm_ext, B_norm_ext):
return A_norm_ext.dot(B_norm_ext).clip(min=0) / 2
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeSt
from mmdet.models.backbones.resnest import Bottleneck as BottleneckS
def test_resnest_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow')
# Test ResNeSt Bottleneck structure
block = BottleneckS(
2, 4, radix=2, reduction_factor=4, stride=2, style='pytorch')
assert block.avd_layer.stride == 2
assert block.conv2.channels == 4
# Test ResNeSt Bottleneck forward
block = BottleneckS(16, 4, radix=2, reduction_factor=4)
x = torch.randn(2, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([2, 16, 56, 56])
def test_resnest_backbone():
with pytest.raises(KeyError):
# ResNeSt depth should be in [50, 101, 152, 200]
ResNeSt(depth=18)
# Test ResNeSt with radix 2, reduction_factor 4
model = ResNeSt(
depth=50,
base_channels=4,
radix=2,
reduction_factor=4,
out_indices=(0, 1, 2, 3))
model.train()
imgs = torch.randn(2, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([2, 16, 8, 8])
assert feat[1].shape == torch.Size([2, 32, 4, 4])
assert feat[2].shape == torch.Size([2, 64, 2, 2])
assert feat[3].shape == torch.Size([2, 128, 1, 1])
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeSt
from mmdet.models.backbones.resnest import Bottleneck as BottleneckS
def test_resnest_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow')
# Test ResNeSt Bottleneck structure
block = BottleneckS(
64, 256, radix=2, reduction_factor=4, stride=2, style='pytorch')
assert block.avd_layer.stride == 2
assert block.conv2.channels == 256
# Test ResNeSt Bottleneck forward
block = BottleneckS(64, 16, radix=2, reduction_factor=4)
x = torch.randn(2, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([2, 64, 56, 56])
def test_resnest_backbone():
with pytest.raises(KeyError):
# ResNeSt depth should be in [50, 101, 152, 200]
ResNeSt(depth=18)
# Test ResNeSt with radix 2, reduction_factor 4
model = ResNeSt(
depth=50, radix=2, reduction_factor=4, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
imgs = torch.randn(2, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([2, 256, 56, 56])
assert feat[1].shape == torch.Size([2, 512, 28, 28])
assert feat[2].shape == torch.Size([2, 1024, 14, 14])
assert feat[3].shape == torch.Size([2, 2048, 7, 7])
|
import os
from pathlib import Path
from torchaudio.datasets import cmuarctic
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
sample_rate = 16000
transcript = "This is a test transcript."
base_dir = os.path.join(root_dir, "ARCTIC", "cmu_us_aew_arctic")
txt_dir = os.path.join(base_dir, "etc")
os.makedirs(txt_dir, exist_ok=True)
txt_file = os.path.join(txt_dir, "txt.done.data")
audio_dir = os.path.join(base_dir, "wav")
os.makedirs(audio_dir, exist_ok=True)
seed = 42
with open(txt_file, "w") as txt:
for c in ["a", "b"]:
for i in range(5):
utterance_id = f"arctic_{c}{i:04d}"
path = os.path.join(audio_dir, f"{utterance_id}.wav")
data = get_whitenoise(
sample_rate=sample_rate,
duration=3,
n_channels=1,
dtype="int16",
seed=seed,
)
save_wav(path, data, sample_rate)
sample = (
normalize_wav(data),
sample_rate,
transcript,
utterance_id.split("_")[1],
)
mocked_data.append(sample)
txt.write(f'( {utterance_id} "{transcript}" )\n')
seed += 1
return mocked_data
class TestCMUARCTIC(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_cmuarctic(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, transcript, utterance_id) in enumerate(dataset):
expected_sample = self.samples[i]
assert sample_rate == expected_sample[1]
assert transcript == expected_sample[2]
assert utterance_id == expected_sample[3]
self.assertEqual(expected_sample[0], waveform, atol=5e-5, rtol=1e-8)
n_ite += 1
assert n_ite == len(self.samples)
def test_cmuarctic_str(self):
dataset = cmuarctic.CMUARCTIC(self.root_dir)
self._test_cmuarctic(dataset)
def test_cmuarctic_path(self):
dataset = cmuarctic.CMUARCTIC(Path(self.root_dir))
self._test_cmuarctic(dataset)
|
import os
from pathlib import Path
from torchaudio.datasets import cmuarctic
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
sample_rate = 16000
transcript = "This is a test transcript."
base_dir = os.path.join(root_dir, "ARCTIC", "cmu_us_aew_arctic")
txt_dir = os.path.join(base_dir, "etc")
os.makedirs(txt_dir, exist_ok=True)
txt_file = os.path.join(txt_dir, "txt.done.data")
audio_dir = os.path.join(base_dir, "wav")
os.makedirs(audio_dir, exist_ok=True)
seed = 42
with open(txt_file, "w") as txt:
for c in ["a", "b"]:
for i in range(5):
utterance_id = f"arctic_{c}{i:04d}"
path = os.path.join(audio_dir, f"{utterance_id}.wav")
data = get_whitenoise(
sample_rate=sample_rate,
duration=3,
n_channels=1,
dtype="int16",
seed=seed,
)
save_wav(path, data, sample_rate)
sample = (
normalize_wav(data),
sample_rate,
transcript,
utterance_id.split("_")[1],
)
mocked_data.append(sample)
txt.write(f'( {utterance_id} "{transcript}" )\n')
seed += 1
return mocked_data
class TestCMUARCTIC(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_cmuarctic(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, transcript, utterance_id) in enumerate(dataset):
expected_sample = self.samples[i]
assert sample_rate == expected_sample[1]
assert transcript == expected_sample[2]
assert utterance_id == expected_sample[3]
self.assertEqual(expected_sample[0], waveform, atol=5e-5, rtol=1e-8)
n_ite += 1
assert n_ite == len(self.samples)
def test_cmuarctic_str(self):
dataset = cmuarctic.CMUARCTIC(self.root_dir)
self._test_cmuarctic(dataset)
def test_cmuarctic_path(self):
dataset = cmuarctic.CMUARCTIC(Path(self.root_dir))
self._test_cmuarctic(dataset)
|
from keras.src import backend
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, seed=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
f"`factor` should be between 0 and 1. Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
# Base case: Unbatched data
batch_size = 1
if len(images.shape) == 4:
# This is a batch of images (4D input)
batch_size = self.backend.core.shape(images)[0]
random_values = self.backend.random.uniform(
shape=(batch_size,),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformation, training=True):
if training:
should_apply = (
transformation
if transformation is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(
should_apply, grayscale_images, images
)
return images
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return tree.map_structure(
lambda x: backend.KerasTensor(
x.shape, dtype=x.dtype, sparse=x.sparse
),
inputs,
)
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, seed=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
f"`factor` should be between 0 and 1. Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.seed = seed
self.generator = self.backend.random.SeedGenerator(seed)
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
# Base case: Unbatched data
batch_size = 1
if len(images.shape) == 4:
# This is a batch of images (4D input)
batch_size = self.backend.core.shape(images)[0]
random_values = self.backend.random.uniform(
shape=(batch_size,),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformation, training=True):
if training:
should_apply = (
transformation
if transformation is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(
should_apply, grayscale_images, images
)
return images
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
_base_ = '../cascade_rcnn/cascade-rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
_base_ = '../cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, TypeVar, Union
from docarray import Document, DocumentArray
from docarray.math import ndarray
from docarray.score import NamedScore
from qdrant_client.http import models
from qdrant_client.http.models.models import Distance
if TYPE_CHECKING: # pragma: no cover
import numpy as np
import tensorflow
import torch
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(
self,
q: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
search_params: Optional[Dict] = None,
**kwargs,
):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=None
if not search_params
else models.SearchParams(**search_params),
limit=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
search_params: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:param search_params: additional parameters of the search
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [
self._find_similar_vectors(
query, limit=limit, filter=filter, search_params=search_params
)
]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(
q, limit=limit, filter=filter, search_params=search_params
)
closest_docs.append(da)
return closest_docs
def _find_with_filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
):
list_of_points, _offset = self.client.scroll(
collection_name=self.collection_name,
scroll_filter=models.Filter(**filter),
with_payload=True,
limit=limit,
)
da = DocumentArray()
for result in list_of_points[:limit]:
doc = Document.from_base64(
result.payload['_serialized'], **self.serialize_config
)
da.append(doc)
return da
def _filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Qdrant` filter)..
:param limit: number of retrieved items
:param filter: filter query used for filtering.
For more information: https://docs.docarray.org/advanced/document-store/qdrant/#qdrant
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
return self._find_with_filter(filter, limit=limit)
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, TypeVar, Union
from docarray import Document, DocumentArray
from docarray.math import ndarray
from docarray.score import NamedScore
from qdrant_client.http import models
from qdrant_client.http.models.models import Distance
if TYPE_CHECKING: # pragma: no cover
import numpy as np
import tensorflow
import torch
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(
self,
q: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
search_params: Optional[Dict] = None,
**kwargs,
):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=None
if not search_params
else models.SearchParams(**search_params),
limit=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
search_params: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:param search_params: additional parameters of the search
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [
self._find_similar_vectors(
query, limit=limit, filter=filter, search_params=search_params
)
]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(
q, limit=limit, filter=filter, search_params=search_params
)
closest_docs.append(da)
return closest_docs
def _find_with_filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
):
list_of_points, _offset = self.client.scroll(
collection_name=self.collection_name,
scroll_filter=models.Filter(**filter),
with_payload=True,
limit=limit,
)
da = DocumentArray()
for result in list_of_points[:limit]:
doc = Document.from_base64(
result.payload['_serialized'], **self.serialize_config
)
da.append(doc)
return da
def _filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Qdrant` filter)..
:param limit: number of retrieved items
:param filter: filter query used for filtering.
For more information: https://docarray.jina.ai/advanced/document-store/qdrant/#qdrant
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
return self._find_with_filter(filter, limit=limit)
|
from __future__ import annotations
from sentence_transformers.losses.MSELoss import MSELoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMSELoss(MSELoss):
def __init__(self, model: SparseEncoder) -> None:
"""
# TODO: Update as it's mentionned trainings not applied to sparse models
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../../examples/sentence_transformer/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SparseEncoder
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../../examples/sentence_transformer/training/distillation/README.html>`_
- `Training > Multilingual Models <../../../examples/sentence_transformer/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Inputs:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Relations:
- :class:`SparseMarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
}
)
def compute_labels(batch):
return {"label": teacher_model.encode(batch["english"], convert_to_sparse_tensor=False)}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.SparseMSELoss(student_model)
trainer = SparseEncoderTrainer(model=student_model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model)
|
from __future__ import annotations
from sentence_transformers.losses.MSELoss import MSELoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMSELoss(MSELoss):
def __init__(self, model: SparseEncoder) -> None:
return super().__init__(model)
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.9.0"
@keras_export("keras.version")
def version():
return __version__
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.8.0"
@keras_export("keras.version")
def version():
return __version__
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .compose import Compose
from .formatting import (ImageToTensor, PackDetInputs, ToDataContainer,
ToTensor, Transpose)
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
__all__ = [
'PackDetInputs', 'Compose', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'LoadImageFromWebcam', 'LoadAnnotations',
'LoadPanopticAnnotations', 'LoadMultiChannelImageFromFiles',
'LoadProposals', 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'RandomCrop',
'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (ImageToTensor, PackDetInputs, ToDataContainer,
ToTensor, Transpose)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,
MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
__all__ = [
'PackDetInputs', 'Compose', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'LoadImageFromWebcam', 'LoadAnnotations',
'LoadPanopticAnnotations', 'LoadMultiChannelImageFromFiles',
'LoadProposals', 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'RandomCrop',
'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'Shear', 'Rotate', 'ColorTransform',
'EqualizeTransform', 'BrightnessTransform', 'ContrastTransform',
'Translate', 'RandomShift', 'Mosaic', 'MixUp', 'RandomAffine',
'YOLOXHSVRandomAug', 'CopyPaste', 'FilterAnnotations', 'Pad'
]
|
from typing import List, Optional
from docarray.base_doc.doc import BaseDoc
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
def test_equal_nested_docs():
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
assert nested_docs == nested_docs
|
from typing import List, Optional
from docarray.base_doc.doc import BaseDoc
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
|
# coding: utf-8
"""Script for generating files with NuGet package metadata."""
import datetime
import sys
from pathlib import Path
from shutil import copyfile
if __name__ == "__main__":
source = Path(sys.argv[1])
nuget_dir = Path(__file__).absolute().parent / "nuget"
linux_folder_path = nuget_dir / "runtimes" / "linux-x64" / "native"
linux_folder_path.mkdir(parents=True, exist_ok=True)
osx_folder_path = nuget_dir / "runtimes" / "osx-x64" / "native"
osx_folder_path.mkdir(parents=True, exist_ok=True)
windows_folder_path = nuget_dir / "runtimes" / "win-x64" / "native"
windows_folder_path.mkdir(parents=True, exist_ok=True)
build_folder_path = nuget_dir / "build"
build_folder_path.mkdir(parents=True, exist_ok=True)
copyfile(source / "lib_lightgbm.so", linux_folder_path / "lib_lightgbm.so")
copyfile(source / "lib_lightgbm.dylib", osx_folder_path / "lib_lightgbm.dylib")
copyfile(source / "lib_lightgbm.dll", windows_folder_path / "lib_lightgbm.dll")
copyfile(source / "lightgbm.exe", windows_folder_path / "lightgbm.exe")
version = (nuget_dir.parents[1] / "VERSION.txt").read_text(encoding="utf-8").strip().replace("rc", "-rc")
nuget_str = rf"""<?xml version="1.0"?>
<package xmlns="http://schemas.microsoft.com/packaging/2013/05/nuspec.xsd">
<metadata>
<id>LightGBM</id>
<version>{version}</version>
<authors>Guolin Ke</authors>
<owners>Guolin Ke</owners>
<license type="expression">MIT</license>
<projectUrl>https://github.com/microsoft/LightGBM</projectUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
<description>A fast, distributed, high performance gradient boosting framework</description>
<copyright>Copyright {datetime.datetime.now().year} @ Microsoft</copyright>
<tags>machine-learning data-mining distributed native boosting gbdt</tags>
<dependencies> </dependencies>
</metadata>
<files>
<file src="build\**" target="build"/>
<file src="runtimes\**" target="runtimes"/>
</files>
</package>
"""
prop_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Condition="Exists('packages.config') OR
Exists('$(MSBuildProjectName).packages.config') OR
Exists('packages.$(MSBuildProjectName).config')">
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.dll"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.exe"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
</ItemGroup>
</Project>
"""
target_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<EnableLightGBMUnsupportedPlatformTargetCheck Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == ''">true</EnableLightGBMUnsupportedPlatformTargetCheck>
</PropertyGroup>
<Target Name="_LightGBMCheckForUnsupportedPlatformTarget"
Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == 'true'"
AfterTargets="_CheckForInvalidConfigurationAndPlatform">
<Error Condition="'$(PlatformTarget)' != 'x64' AND
('$(OutputType)' == 'Exe' OR '$(OutputType)'=='WinExe') AND
!('$(TargetFrameworkIdentifier)' == '.NETCoreApp' AND '$(PlatformTarget)' == '')"
Text="LightGBM currently supports 'x64' processor architectures. Please ensure your application is targeting 'x64'." />
</Target>
</Project>
"""
(nuget_dir / "LightGBM.nuspec").write_text(nuget_str, encoding="utf-8")
(nuget_dir / "build" / "LightGBM.props").write_text(prop_str, encoding="utf-8")
(nuget_dir / "build" / "LightGBM.targets").write_text(target_str, encoding="utf-8")
|
# coding: utf-8
"""Script for generating files with NuGet package metadata."""
import datetime
import sys
from pathlib import Path
from shutil import copyfile
if __name__ == "__main__":
source = Path(sys.argv[1])
current_dir = Path(__file__).absolute().parent
linux_folder_path = current_dir / "runtimes" / "linux-x64" / "native"
linux_folder_path.mkdir(parents=True, exist_ok=True)
osx_folder_path = current_dir / "runtimes" / "osx-x64" / "native"
osx_folder_path.mkdir(parents=True, exist_ok=True)
windows_folder_path = current_dir / "runtimes" / "win-x64" / "native"
windows_folder_path.mkdir(parents=True, exist_ok=True)
build_folder_path = current_dir / "build"
build_folder_path.mkdir(parents=True, exist_ok=True)
copyfile(source / "lib_lightgbm.so", linux_folder_path / "lib_lightgbm.so")
copyfile(source / "lib_lightgbm.dylib", osx_folder_path / "lib_lightgbm.dylib")
copyfile(source / "lib_lightgbm.dll", windows_folder_path / "lib_lightgbm.dll")
copyfile(source / "lightgbm.exe", windows_folder_path / "lightgbm.exe")
version = (current_dir.parent / "VERSION.txt").read_text(encoding="utf-8").strip().replace("rc", "-rc")
nuget_str = rf"""<?xml version="1.0"?>
<package xmlns="http://schemas.microsoft.com/packaging/2013/05/nuspec.xsd">
<metadata>
<id>LightGBM</id>
<version>{version}</version>
<authors>Guolin Ke</authors>
<owners>Guolin Ke</owners>
<license type="expression">MIT</license>
<projectUrl>https://github.com/microsoft/LightGBM</projectUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
<description>A fast, distributed, high performance gradient boosting framework</description>
<copyright>Copyright {datetime.datetime.now().year} @ Microsoft</copyright>
<tags>machine-learning data-mining distributed native boosting gbdt</tags>
<dependencies> </dependencies>
</metadata>
<files>
<file src="build\**" target="build"/>
<file src="runtimes\**" target="runtimes"/>
</files>
</package>
"""
prop_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Condition="Exists('packages.config') OR
Exists('$(MSBuildProjectName).packages.config') OR
Exists('packages.$(MSBuildProjectName).config')">
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.dll"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.exe"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
</ItemGroup>
</Project>
"""
target_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<EnableLightGBMUnsupportedPlatformTargetCheck Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == ''">true</EnableLightGBMUnsupportedPlatformTargetCheck>
</PropertyGroup>
<Target Name="_LightGBMCheckForUnsupportedPlatformTarget"
Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == 'true'"
AfterTargets="_CheckForInvalidConfigurationAndPlatform">
<Error Condition="'$(PlatformTarget)' != 'x64' AND
('$(OutputType)' == 'Exe' OR '$(OutputType)'=='WinExe') AND
!('$(TargetFrameworkIdentifier)' == '.NETCoreApp' AND '$(PlatformTarget)' == '')"
Text="LightGBM currently supports 'x64' processor architectures. Please ensure your application is targeting 'x64'." />
</Target>
</Project>
"""
(current_dir / "LightGBM.nuspec").write_text(nuget_str, encoding="utf-8")
(current_dir / "build" / "LightGBM.props").write_text(prop_str, encoding="utf-8")
(current_dir / "build" / "LightGBM.targets").write_text(target_str, encoding="utf-8")
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict
import torch.nn as nn
from torch import Tensor
from mmdet.registry import MODELS
from ..layers import (ConditionalDetrTransformerDecoder,
DetrTransformerEncoder, SinePositionalEncoding)
from .detr import DETR
@MODELS.register_module()
class ConditionalDETR(DETR):
r"""Implementation of `Conditional DETR for Fast Training Convergence.
<https://arxiv.org/abs/2108.06152>`_.
Code is modified from the `official github repo
<https://github.com/Atten4Vis/ConditionalDETR>`_.
"""
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding_cfg)
self.encoder = DetrTransformerEncoder(**self.encoder)
self.decoder = ConditionalDetrTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
# NOTE The embed_dims is typically passed from the inside out.
# For example in DETR, The embed_dims is passed as
# self_attn -> the first encoder layer -> encoder -> detector.
self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
f'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
memory_mask: Tensor, memory_pos: Tensor) -> Dict:
"""Forward with Transformer decoder.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
memory_pos (Tensor): The positional embeddings of memory, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` and `references` of the decoder output.
- hidden_states (Tensor): Has shape
(num_decoder_layers, bs, num_queries, dim)
- references (Tensor): Has shape
(bs, num_queries, 2)
"""
hidden_states, references = self.decoder(
query=query,
key=memory,
query_pos=query_pos,
key_pos=memory_pos,
key_padding_mask=memory_mask)
head_inputs_dict = dict(
hidden_states=hidden_states, references=references)
return head_inputs_dict
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict
import torch.nn as nn
from torch import Tensor
from mmdet.registry import MODELS
from ..layers import (ConditionalDetrTransformerDecoder,
DetrTransformerEncoder, SinePositionalEncoding)
from .detr import DETR
@MODELS.register_module()
class ConditionalDETR(DETR):
r"""Implementation of `Conditional DETR for Fast Training Convergence.
<https://arxiv.org/abs/2108.06152>`_.
Code is modified from the `official github repo
<https://github.com/Atten4Vis/ConditionalDETR>`_.
"""
def _init_layers(self) -> None:
"""Initialize layers except for backbone, neck and bbox_head."""
self.positional_encoding = SinePositionalEncoding(
**self.positional_encoding_cfg)
self.encoder = DetrTransformerEncoder(**self.encoder)
self.decoder = ConditionalDetrTransformerDecoder(**self.decoder)
self.embed_dims = self.encoder.embed_dims
# NOTE The embed_dims is typically passed from the inside out.
# For example in DETR, The embed_dims is passed as
# self_attn -> the first encoder layer -> encoder -> detector.
self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)
num_feats = self.positional_encoding.num_feats
assert num_feats * 2 == self.embed_dims, \
f'embed_dims should be exactly 2 times of num_feats. ' \
f'Found {self.embed_dims} and {num_feats}.'
def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor,
memory_mask: Tensor, memory_pos: Tensor) -> Dict:
"""Forward with Transformer decoder.
Args:
query (Tensor): The queries of decoder inputs, has shape
(bs, num_queries, dim).
query_pos (Tensor): The positional queries of decoder inputs,
has shape (bs, num_queries, dim).
memory (Tensor): The output embeddings of the Transformer encoder,
has shape (bs, num_feat_points, dim).
memory_mask (Tensor): ByteTensor, the padding mask of the memory,
has shape (bs, num_feat_points).
memory_pos (Tensor): The positional embeddings of memory, has
shape (bs, num_feat_points, dim).
Returns:
dict: The dictionary of decoder outputs, which includes the
`hidden_states` and `references` of the decoder output.
- hidden_states (Tensor): Has shape
(num_decoder_layers, bs, num_queries, dim)
- references (Tensor): Has shape
(bs, num_queries, 2)
"""
hidden_states, references = self.decoder(
query=query,
key=memory,
value=memory,
query_pos=query_pos,
key_pos=memory_pos,
key_padding_mask=memory_mask)
head_inputs_dict = dict(
hidden_states=hidden_states, references=references)
return head_inputs_dict
|
import warnings
from typing import Optional, TypeVar
from docarray.typing.bytes.video_bytes import VideoBytes, VideoLoadResult
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='VideoUrl')
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a video file.
Can be remote (web) URL, or a local file path.
"""
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a `NamedTuple` of
[`VideoNdArray`][docarray.typing.VideoNdArray],
[`AudioNdArray`][docarray.typing.AudioNdArray]
and [`NdArray`][docarray.typing.NdArray].
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDoc):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
```
---
You can load only the key frames (or video, audio respectively):
---
```python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: [`AudioNdArray`][docarray.typing.AudioNdArray] representing the audio content,
[`VideoNdArray`][docarray.typing.VideoNdArray] representing the images of the video,
[`NdArray`][docarray.typing.NdArray] of the key frame indices.
"""
buffer = self.load_bytes(**kwargs)
return buffer.load()
def load_bytes(self, timeout: Optional[float] = None) -> VideoBytes:
"""
Convert url to [`VideoBytes`][docarray.typing.VideoBytes]. This will either load or download
the file and save it into an [`VideoBytes`][docarray.typing.VideoBytes] object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: [`VideoBytes`][docarray.typing.VideoBytes] object
"""
bytes_ = super().load_bytes(timeout=timeout)
return VideoBytes(bytes_)
def display(self):
"""
Play video from url in notebook.
"""
if is_notebook():
from IPython.display import display
remote_url = True if self.startswith('http') else False
if remote_url:
from IPython.display import Video
b = self.load_bytes()
display(Video(data=b, embed=True, mimetype='video/mp4'))
else:
import os
from IPython.display import HTML
path = os.path.relpath(self)
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{path}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src))
else:
warnings.warn('Display of video is only possible in a notebook.')
|
import warnings
from typing import Optional, TypeVar
from docarray.typing.bytes.video_bytes import VideoBytes, VideoLoadResult
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='VideoUrl')
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a video file.
Can be remote (web) URL, or a local file path.
"""
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a `NamedTuple` of
[`VideoNdArray`][docarray.typing.VideoNdArray],
[`AudioNdArray`][docarray.typing.AudioNdArray]
and [`NdArray`][docarray.typing.NdArray].
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDoc):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
```
---
You can load only the key frames (or video, audio respectively):
---
```python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: [`AudioNdArray`][docarray.typing.AudioNdArray] representing the audio content,
[`VideoNdArray`][docarray.typing.VideoNdArray] representing the images of the video,
[`NdArray`][docarray.typing.NdArray] of the key frame indices.
"""
buffer = self.load_bytes(**kwargs)
return buffer.load()
def load_bytes(self, timeout: Optional[float] = None) -> VideoBytes:
"""
Convert url to [`VideoBytes`][docarray.typing.VideoBytes]. This will either load or download
the file and save it into an [`VideoBytes`][docarray.typing.VideoBytes] object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: [`VideoBytes`][docarray.typing.VideoBytes] object
"""
bytes_ = super().load_bytes(timeout=timeout)
return VideoBytes(bytes_)
def display(self):
"""
Play video from url in notebook.
"""
if is_notebook():
from IPython.display import display
remote_url = True if self.startswith('http') else False
if remote_url:
from IPython.display import Video
b = self.load_bytes()
display(Video(data=b, embed=True, mimetype='video/mp4'))
else:
import os
from IPython.display import HTML
path = os.path.relpath(self)
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{path}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src))
else:
warnings.warn('Display of video is only possible in a notebook.')
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.tree.tree_api import MAP_TO_NONE
from keras.src.tree.tree_api import assert_same_paths
from keras.src.tree.tree_api import assert_same_structure
from keras.src.tree.tree_api import flatten
from keras.src.tree.tree_api import flatten_with_path
from keras.src.tree.tree_api import is_nested
from keras.src.tree.tree_api import lists_to_tuples
from keras.src.tree.tree_api import map_shape_structure
from keras.src.tree.tree_api import map_structure
from keras.src.tree.tree_api import map_structure_up_to
from keras.src.tree.tree_api import pack_sequence_as
from keras.src.tree.tree_api import traverse
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.tree.tree_api import assert_same_paths
from keras.src.tree.tree_api import assert_same_structure
from keras.src.tree.tree_api import flatten
from keras.src.tree.tree_api import flatten_with_path
from keras.src.tree.tree_api import is_nested
from keras.src.tree.tree_api import lists_to_tuples
from keras.src.tree.tree_api import map_shape_structure
from keras.src.tree.tree_api import map_structure
from keras.src.tree.tree_api import map_structure_up_to
from keras.src.tree.tree_api import pack_sequence_as
from keras.src.tree.tree_api import traverse
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.quantizers import deserialize
from keras.src.quantizers import get
from keras.src.quantizers import serialize
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize
from keras.src.quantizers.quantizers import compute_float8_amax_history
from keras.src.quantizers.quantizers import compute_float8_scale
from keras.src.quantizers.quantizers import fake_quant_with_min_max_vars
from keras.src.quantizers.quantizers import quantize_and_dequantize
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.quantizers import deserialize
from keras.src.quantizers import get
from keras.src.quantizers import serialize
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize
from keras.src.quantizers.quantizers import compute_float8_amax_history
from keras.src.quantizers.quantizers import compute_float8_scale
from keras.src.quantizers.quantizers import (
fake_quant_with_min_max_vars as fake_quant_with_min_max_vars_per_channel,
)
from keras.src.quantizers.quantizers import quantize_and_dequantize
|
"""Callback Handler that writes to a file."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
from typing_extensions import override
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class FileCallbackHandler(BaseCallbackHandler):
"""Callback Handler that writes to a file.
Parameters:
filename: The file to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text.
"""
def __init__(
self, filename: str, mode: str = "a", color: Optional[str] = None
) -> None:
"""Initialize callback handler.
Args:
filename: The filename to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text. Defaults to None.
"""
self.file = cast("TextIO", Path(filename).open(mode, encoding="utf-8")) # noqa: SIM115
self.color = color
def __del__(self) -> None:
"""Destructor to cleanup when done."""
self.file.close()
@override
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
if "name" in kwargs:
name = kwargs["name"]
elif serialized:
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
else:
name = "<unknown>"
print_text(
f"\n\n\033[1m> Entering new {name} chain...\033[0m",
end="\n",
file=self.file,
)
@override
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain.
Args:
outputs (Dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
@override
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action.
Args:
action (AgentAction): The agent action.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(action.log, color=color or self.color, file=self.file)
@override
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation.
Args:
output (str): The output to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
observation_prefix (Optional[str], optional): The observation prefix.
Defaults to None.
llm_prefix (Optional[str], optional): The LLM prefix.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
if observation_prefix is not None:
print_text(f"\n{observation_prefix}", file=self.file)
print_text(output, color=color or self.color, file=self.file)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}", file=self.file)
@override
def on_text(
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
) -> None:
"""Run when the agent ends.
Args:
text (str): The text to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
end (str, optional): The end character. Defaults to "".
**kwargs (Any): Additional keyword arguments.
"""
print_text(text, color=color or self.color, end=end, file=self.file)
@override
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on the agent end.
Args:
finish (AgentFinish): The agent finish.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
|
"""Callback Handler that writes to a file."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, TextIO, cast
from typing_extensions import override
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
class FileCallbackHandler(BaseCallbackHandler):
"""Callback Handler that writes to a file.
Parameters:
filename: The file to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text.
"""
def __init__(
self, filename: str, mode: str = "a", color: Optional[str] = None
) -> None:
"""Initialize callback handler.
Args:
filename: The filename to write to.
mode: The mode to open the file in. Defaults to "a".
color: The color to use for the text. Defaults to None.
"""
self.file = cast("TextIO", Path(filename).open(mode, encoding="utf-8")) # noqa: SIM115
self.color = color
def __del__(self) -> None:
"""Destructor to cleanup when done."""
self.file.close()
@override
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
"""Print out that we are entering a chain.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
if "name" in kwargs:
name = kwargs["name"]
else:
if serialized:
name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1])
else:
name = "<unknown>"
print_text(
f"\n\n\033[1m> Entering new {name} chain...\033[0m",
end="\n",
file=self.file,
)
@override
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Print out that we finished a chain.
Args:
outputs (Dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file)
@override
def on_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> Any:
"""Run on agent action.
Args:
action (AgentAction): The agent action.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(action.log, color=color or self.color, file=self.file)
@override
def on_tool_end(
self,
output: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""If not the final action, print out observation.
Args:
output (str): The output to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
observation_prefix (Optional[str], optional): The observation prefix.
Defaults to None.
llm_prefix (Optional[str], optional): The LLM prefix.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
if observation_prefix is not None:
print_text(f"\n{observation_prefix}", file=self.file)
print_text(output, color=color or self.color, file=self.file)
if llm_prefix is not None:
print_text(f"\n{llm_prefix}", file=self.file)
@override
def on_text(
self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any
) -> None:
"""Run when the agent ends.
Args:
text (str): The text to print.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
end (str, optional): The end character. Defaults to "".
**kwargs (Any): Additional keyword arguments.
"""
print_text(text, color=color or self.color, end=end, file=self.file)
@override
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Run on the agent end.
Args:
finish (AgentFinish): The agent finish.
color (Optional[str], optional): The color to use for the text.
Defaults to None.
**kwargs (Any): Additional keyword arguments.
"""
print_text(finish.log, color=color or self.color, end="\n", file=self.file)
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._datapoint import Datapoint
class Image(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for images.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Image:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if tensor.ndim < 2:
raise ValueError
elif tensor.ndim == 2:
tensor = tensor.unsqueeze(0)
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
_ImageType = Union[torch.Tensor, PIL.Image.Image, Image]
_ImageTypeJIT = torch.Tensor
_TensorImageType = Union[torch.Tensor, Image]
_TensorImageTypeJIT = torch.Tensor
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._datapoint import Datapoint
class Image(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for images.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Image:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if tensor.ndim < 2:
raise ValueError
elif tensor.ndim == 2:
tensor = tensor.unsqueeze(0)
return cls._wrap(tensor)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
_ImageType = Union[torch.Tensor, PIL.Image.Image, Image]
_ImageTypeJIT = torch.Tensor
_TensorImageType = Union[torch.Tensor, Image]
_TensorImageTypeJIT = torch.Tensor
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class SCNetMaskHead(FCNMaskHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res: bool = True, **kwargs) -> None:
super().__init__(**kwargs)
self.conv_to_res = conv_to_res
if conv_to_res:
assert self.conv_kernel_size == 3
self.num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
self.num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class SCNetMaskHead(FCNMaskHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res=True, **kwargs):
super(SCNetMaskHead, self).__init__(**kwargs)
self.conv_to_res = conv_to_res
if conv_to_res:
assert self.conv_kernel_size == 3
self.num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
self.num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
|
import torch
from torch import Tensor
from torch import nn
from typing import List, Dict
import os
import json
import logging
import numpy as np
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: List[str],
word_weights: Dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super(BoW, self).__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
"{} out of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: List[str], **kwargs) -> List[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(self, tokenized_texts: List[List[int]], pad_seq_length: int = 0):
vectors = []
for tokens in tokenized_texts:
vector = np.zeros(self.get_sentence_embedding_dimension(), dtype=np.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.tensor(vectors, dtype=torch.float)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
import torch
from torch import Tensor
from torch import nn
from typing import List, Dict
import os
import json
import logging
import numpy as np
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: List[str],
word_weights: Dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super(BoW, self).__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
"{} out of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: List[str]) -> List[int]:
tokenized = [self.tokenizer.tokenize(text) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(self, tokenized_texts: List[List[int]], pad_seq_length: int = 0):
vectors = []
for tokens in tokenized_texts:
vector = np.zeros(self.get_sentence_embedding_dimension(), dtype=np.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.tensor(vectors, dtype=torch.float)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
import argparse
import logging
from typing import Optional
import torch
import torchaudio
from torchaudio.prototype.ctc_decoder import lexicon_decoder
logger = logging.getLogger(__name__)
def _download_files(lexicon_file, kenlm_file):
torch.hub.download_url_to_file(
"https://pytorch.s3.amazonaws.com/torchaudio/tutorial-assets/ctc-decoding/lexicon-librispeech.txt", lexicon_file
)
torch.hub.download_url_to_file(
"https://pytorch.s3.amazonaws.com/torchaudio/tutorial-assets/ctc-decoding/4-gram-librispeech.bin", kenlm_file
)
def run_inference(args):
# get pretrained wav2vec2.0 model
bundle = getattr(torchaudio.pipelines, args.model)
model = bundle.get_model()
tokens = [label.lower() for label in bundle.get_labels()]
# get decoder files
hub_dir = torch.hub.get_dir()
lexicon_file = f"{hub_dir}/lexicon.txt"
kenlm_file = f"{hub_dir}/kenlm.bin"
_download_files(lexicon_file, kenlm_file)
decoder = lexicon_decoder(
lexicon=lexicon_file,
tokens=tokens,
lm=kenlm_file,
nbest=args.nbest,
beam_size=args.beam_size,
beam_size_token=args.beam_size_token,
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_score,
sil_score=args.sil_score,
log_add=False,
)
dataset = torchaudio.datasets.LIBRISPEECH(args.librispeech_path, url=args.split, download=False)
total_edit_distance = 0
total_length = 0
for idx, sample in enumerate(dataset):
waveform, _, transcript, _, _, _ = sample
transcript = transcript.strip().lower().strip()
with torch.inference_mode():
emission, _ = model(waveform)
results = decoder(emission)
total_edit_distance += torchaudio.functional.edit_distance(transcript.split(), results[0][0].words)
total_length += len(transcript.split())
if idx % 100 == 0:
logger.info(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.info(f"Final WER: {total_edit_distance / total_length}")
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--librispeech_path",
type=str,
help="folder where LibriSpeech is stored",
)
parser.add_argument(
"--split",
type=str,
help="LibriSpeech dataset split",
choices=["dev-clean", "dev-other", "test-clean", "test-other"],
default="test-other",
)
parser.add_argument(
"--model",
type=str,
default="WAV2VEC2_ASR_BASE_960H",
help="pretrained Wav2Vec2 model from torchaudio.pipelines",
)
parser.add_argument("--nbest", type=int, default=1, help="number of best hypotheses to return")
parser.add_argument(
"--beam-size", type=int, default=500, help="beam size for determining number of hypotheses to store"
)
parser.add_argument(
"--beam-size-token",
type=Optional[int],
default=None,
help="number of tokens to consider at each beam search step",
)
parser.add_argument("--beam-threshold", type=int, default=50, help="beam threshold for pruning hypotheses")
parser.add_argument(
"--lm-weight",
type=float,
default=1.74,
help="languge model weight",
)
parser.add_argument(
"--word-score",
type=float,
default=0.52,
help="word insertion score",
)
parser.add_argument("--unk_score", type=float, default=float("-inf"), help="unknown word insertion score")
parser.add_argument("--sil_score", type=float, default=0, help="silence insertion score")
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def _init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def _main():
args = _parse_args()
_init_logger(args.debug)
run_inference(args)
if __name__ == "__main__":
_main()
|
import argparse
import logging
from typing import Optional
import torch
import torchaudio
from torchaudio.prototype.ctc_decoder import lexicon_decoder
logger = logging.getLogger(__name__)
def _download_files(lexicon_file, kenlm_file):
torch.hub.download_url_to_file(
"https://pytorch.s3.amazonaws.com/torchaudio/tutorial-assets/ctc-decoding/lexicon-librispeech.txt", lexicon_file
)
torch.hub.download_url_to_file(
"https://pytorch.s3.amazonaws.com/torchaudio/tutorial-assets/ctc-decoding/4-gram-librispeech.bin", kenlm_file
)
def run_inference(args):
# get pretrained wav2vec2.0 model
bundle = getattr(torchaudio.pipelines, args.model)
model = bundle.get_model()
tokens = [label.lower() for label in bundle.get_labels()]
# get decoder files
hub_dir = torch.hub.get_dir()
lexicon_file = f"{hub_dir}/lexicon.txt"
kenlm_file = f"{hub_dir}/kenlm.bin"
_download_files(lexicon_file, kenlm_file)
decoder = lexicon_decoder(
lexicon=lexicon_file,
tokens=tokens,
lm=kenlm_file,
nbest=1,
beam_size=1500,
beam_size_token=None,
beam_threshold=50,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=float("-inf"),
sil_score=0,
log_add=False,
)
dataset = torchaudio.datasets.LIBRISPEECH(args.librispeech_path, url=args.split, download=False)
total_edit_distance = 0
total_length = 0
for idx, sample in enumerate(dataset):
waveform, _, transcript, _, _, _ = sample
transcript = transcript.strip().lower().strip()
with torch.inference_mode():
emission, _ = model(waveform)
results = decoder(emission)
total_edit_distance += torchaudio.functional.edit_distance(transcript.split(), results[0][0].words)
total_length += len(transcript.split())
if idx % 100 == 0:
logger.info(f"Processed elem {idx}; WER: {total_edit_distance / total_length}")
logger.info(f"Final WER: {total_edit_distance / total_length}")
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--librispeech_path",
type=str,
help="folder where LibriSpeech is stored",
)
parser.add_argument(
"--split",
type=str,
help="LibriSpeech dataset split",
choices=["dev-clean", "dev-other", "test-clean", "test-other"],
default="test-other",
)
parser.add_argument(
"--model",
type=str,
default="WAV2VEC2_ASR_BASE_960H",
help="pretrained Wav2Vec2 model from torchaudio.pipelines",
)
parser.add_argument("--nbest", type=int, default=1, help="number of best hypotheses to return")
parser.add_argument(
"--beam-size", type=int, default=500, help="beam size for determining number of hypotheses to store"
)
parser.add_argument(
"--beam-size-token",
type=Optional[int],
default=None,
help="number of tokens to consider at each beam search step",
)
parser.add_argument("--beam-threshold", type=int, default=50, help="beam threshold for pruning hypotheses")
parser.add_argument(
"--lm-weight",
type=float,
default=1.74,
help="languge model weight",
)
parser.add_argument(
"--word-score",
type=float,
default=0.52,
help="word insertion score",
)
parser.add_argument("--unk_score", type=float, default=float("-inf"), help="unknown word insertion score")
parser.add_argument("--sil_score", type=float, default=0, help="silence insertion score")
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def _init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def _main():
args = _parse_args()
_init_logger(args.debug)
run_inference(args)
if __name__ == "__main__":
_main()
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogVideoXTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogVideoXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 1
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 1,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"patch_size_t": None,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
class CogVideoX1_5TransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 1,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"patch_size_t": 2,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
"use_rotary_positional_embeddings": True,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogVideoXTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogVideoXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 1
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 1,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, EVALUATORS, HOOKS, LOOPS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISUALIZERS, WEIGHT_INITIALIZERS, WRITERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'EVALUATORS', 'MODEL_WRAPPERS', 'LOOPS', 'WRITERS', 'VISUALIZERS',
'DefaultScope'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, EVALUATORS, HOOKS, LOOPS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISUALIZERS, WEIGHT_INITIALIZERS, WRITERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'EVALUATORS', 'MODEL_WRAPPERS', 'LOOPS', 'WRITERS', 'VISUALIZERS'
]
|
"""
This script contains an example how to perform semantic search with Seismic.
For more information, please refer to the documentation:
https://github.com/TusKANNy/seismic/blob/main/docs/Guidelines.md
All you need is installing the `pyseismic-lsr` package:
```
pip install pyseismic-lsr
```
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_seismic
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
print("Start encoding corpus...")
start_time = time.time()
corpus_embeddings = sparse_model.encode_document(
corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode_query(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Seismic
results, search_time, corpus_index = semantic_search_seismic(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Seismic.
For more information, please refer to the documentation:
https://github.com/TusKANNy/seismic/blob/main/docs/Guidelines.md
All you need is installing the `pyseismic-lsr` package:
```
pip install pyseismic-lsr
```
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_seismic
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
print("Start encoding corpus...")
start_time = time.time()
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using Seismic
results, search_time, corpus_index = semantic_search_seismic(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
from __future__ import annotations
import collections
import json
import logging
import os
import string
from typing import Iterable
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
logger = logging.getLogger(__name__)
class PhraseTokenizer(WordTokenizer):
"""Tokenizes the text with respect to existent phrases in the vocab.
This tokenizers respects phrases that are in the vocab. Phrases are separated with 'ngram_separator', for example,
in Google News word2vec file, ngrams are separated with a _ like New_York. These phrases are detected in text and merged as one special token. (New York is the ... => [New_York, is, the])
"""
def __init__(
self,
vocab: Iterable[str] = [],
stop_words: Iterable[str] = ENGLISH_STOP_WORDS,
do_lower_case: bool = False,
ngram_separator: str = "_",
max_ngram_length: int = 5,
):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.ngram_separator = ngram_separator
self.max_ngram_length = max_ngram_length
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
# Check for ngram in vocab
self.ngram_lookup = set()
self.ngram_lengths = set()
for word in vocab:
if self.ngram_separator is not None and self.ngram_separator in word:
# Sum words might me malformed in e.g. google news word2vec, containing two or more _ after each other
ngram_count = word.count(self.ngram_separator) + 1
if self.ngram_separator + self.ngram_separator not in word and ngram_count <= self.max_ngram_length:
self.ngram_lookup.add(word)
self.ngram_lengths.add(ngram_count)
if len(vocab) > 0:
logger.info("PhraseTokenizer - Phrase ngram lengths: {}".format(self.ngram_lengths))
logger.info("PhraseTokenizer - Num phrases: {}".format(len(self.ngram_lookup)))
def tokenize(self, text: str, **kwargs) -> list[int]:
from nltk import word_tokenize
tokens = word_tokenize(text, preserve_line=True)
# phrase detection
for ngram_len in sorted(self.ngram_lengths, reverse=True):
idx = 0
while idx <= len(tokens) - ngram_len:
ngram = self.ngram_separator.join(tokens[idx : idx + ngram_len])
if ngram in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram]
elif ngram.lower() in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram.lower()]
idx += 1
# Map tokens to idx, filter stop words
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "phrasetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
"ngram_separator": self.ngram_separator,
"max_ngram_length": self.max_ngram_length,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "phrasetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return PhraseTokenizer(**config)
|
import collections
import json
import logging
import os
import string
from typing import Iterable, List
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
logger = logging.getLogger(__name__)
class PhraseTokenizer(WordTokenizer):
"""Tokenizes the text with respect to existent phrases in the vocab.
This tokenizers respects phrases that are in the vocab. Phrases are separated with 'ngram_separator', for example,
in Google News word2vec file, ngrams are separated with a _ like New_York. These phrases are detected in text and merged as one special token. (New York is the ... => [New_York, is, the])
"""
def __init__(
self,
vocab: Iterable[str] = [],
stop_words: Iterable[str] = ENGLISH_STOP_WORDS,
do_lower_case: bool = False,
ngram_separator: str = "_",
max_ngram_length: int = 5,
):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.ngram_separator = ngram_separator
self.max_ngram_length = max_ngram_length
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
# Check for ngram in vocab
self.ngram_lookup = set()
self.ngram_lengths = set()
for word in vocab:
if self.ngram_separator is not None and self.ngram_separator in word:
# Sum words might me malformed in e.g. google news word2vec, containing two or more _ after each other
ngram_count = word.count(self.ngram_separator) + 1
if self.ngram_separator + self.ngram_separator not in word and ngram_count <= self.max_ngram_length:
self.ngram_lookup.add(word)
self.ngram_lengths.add(ngram_count)
if len(vocab) > 0:
logger.info("PhraseTokenizer - Phrase ngram lengths: {}".format(self.ngram_lengths))
logger.info("PhraseTokenizer - Num phrases: {}".format(len(self.ngram_lookup)))
def tokenize(self, text: str, **kwargs) -> List[int]:
from nltk import word_tokenize
tokens = word_tokenize(text, preserve_line=True)
# phrase detection
for ngram_len in sorted(self.ngram_lengths, reverse=True):
idx = 0
while idx <= len(tokens) - ngram_len:
ngram = self.ngram_separator.join(tokens[idx : idx + ngram_len])
if ngram in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram]
elif ngram.lower() in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram.lower()]
idx += 1
# Map tokens to idx, filter stop words
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "phrasetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
"ngram_separator": self.ngram_separator,
"max_ngram_length": self.max_ngram_length,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "phrasetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return PhraseTokenizer(**config)
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._deprecated import ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._deprecated import ToTensor # usort: skip
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import Formatter
if TYPE_CHECKING:
import tensorflow as tf
class TFFormatter(Formatter[dict, "tf.Tensor", dict]):
def __init__(self, features=None, decoded=True, **tf_tensor_kwargs):
super().__init__(features=features, decoded=decoded)
self.tf_tensor_kwargs = tf_tensor_kwargs
import tensorflow as tf # noqa: import tf at initialization
def _consolidate(self, column):
import tensorflow as tf
if isinstance(column, list) and column:
if all(
isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return tf.stack(column)
elif all(
isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype
for x in column
):
# only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated
return tf.ragged.stack(column)
return column
def _tensorize(self, value):
import tensorflow as tf
if value is None:
return value
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": tf.int64}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": tf.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct)
def format_row(self, pa_table: pa.Table) -> dict:
row = self.numpy_arrow_extractor().extract_row(pa_table)
if self.decoded:
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "tf.Tensor":
column = self.numpy_arrow_extractor().extract_column(pa_table)
if self.decoded:
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
if self.decoded:
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from ..utils.py_utils import map_nested
from .formatting import Formatter
if TYPE_CHECKING:
import tensorflow as tf
class TFFormatter(Formatter[dict, "tf.Tensor", dict]):
def __init__(self, features=None, decoded=True, **tf_tensor_kwargs):
self.tf_tensor_kwargs = tf_tensor_kwargs
import tensorflow as tf # noqa: import tf at initialization
def _tensorize(self, value):
import tensorflow as tf
if "dtype" not in self.tf_tensor_kwargs:
if np.issubdtype(value.dtype, np.integer):
np_dtype = np.int64
tf_dtype = tf.int64
default_dtype = {"dtype": tf_dtype}
elif np.issubdtype(value.dtype, np.floating):
np_dtype = np.float32
tf_dtype = tf.float32
default_dtype = {"dtype": tf_dtype}
else:
np_dtype = None
tf_dtype = None
default_dtype = {}
else:
tf_dtype = self.tf_tensor_kwargs["dtype"]
np_dtype = tf_dtype.as_numpy_dtype
default_dtype = {}
# Saving the most expensive methods for last
try:
return tf.convert_to_tensor(value, dtype=tf_dtype)
except ValueError:
try:
return tf.ragged.stack([np.array(subarr, dtype=np_dtype) for subarr in value])
except ValueError:
# tf.ragged.constant is orders of magnitude slower than tf.ragged.stack
return tf.ragged.constant(value, **{**default_dtype, **self.tf_tensor_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, (list, np.ndarray)):
if data_struct.dtype == object: # tensorflow tensors can sometimes be instantied from an array of objects
try:
return self._tensorize(data_struct)
except ValueError:
return [self.recursive_tensorize(substruct) for substruct in data_struct]
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> dict:
row = self.numpy_arrow_extractor().extract_row(pa_table)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "tf.Tensor":
col = self.numpy_arrow_extractor().extract_column(pa_table)
return self.recursive_tensorize(col)
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
return self.recursive_tensorize(batch)
|
import importlib.util
import warnings
from functools import wraps
from typing import Optional
import torch
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
return all(importlib.util.find_spec(m) is not None for m in modules)
def requires_module(*modules: str):
"""Decorate function to give error message if invoked without required optional modules.
This decorator is to give better error message to users rather
than raising ``NameError: name 'module' is not defined`` at random places.
"""
missing = [m for m in modules if not is_module_available(m)]
if not missing:
# fall through. If all the modules are available, no need to decorate
def decorator(func):
return func
else:
req = f"module: {missing[0]}" if len(missing) == 1 else f"modules: {missing}"
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires {req}")
return wrapped
return decorator
def deprecated(direction: str, version: Optional[str] = None):
"""Decorator to add deprecation message
Args:
direction (str): Migration steps to be given to users.
version (str or int): The version when the object will be removed
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = (
f"{func.__module__}.{func.__name__} has been deprecated "
f'and will be removed from {"future" if version is None else version} release. '
f"{direction}"
)
warnings.warn(message, stacklevel=2)
return func(*args, **kwargs)
return wrapped
return decorator
def is_kaldi_available():
try:
import torchaudio.lib._torchaudio
return torchaudio.lib._torchaudio.is_kaldi_available()
except Exception:
return False
def requires_kaldi():
if is_kaldi_available():
def decorator(func):
return func
else:
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires kaldi")
return wrapped
return decorator
def _check_soundfile_importable():
if not is_module_available("soundfile"):
return False
try:
import soundfile # noqa: F401
return True
except Exception:
warnings.warn("Failed to import soundfile. 'soundfile' backend is not available.")
return False
_is_soundfile_importable = _check_soundfile_importable()
def is_soundfile_available():
return _is_soundfile_importable
def requires_soundfile():
if is_soundfile_available():
def decorator(func):
return func
else:
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires soundfile")
return wrapped
return decorator
def is_sox_available():
return is_module_available("torchaudio.lib._torchaudio_sox")
def requires_sox():
if is_sox_available():
def decorator(func):
return func
else:
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires sox")
return wrapped
return decorator
|
import importlib.util
import warnings
from functools import wraps
from typing import Optional
import torch
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
return all(importlib.util.find_spec(m) is not None for m in modules)
def requires_module(*modules: str):
"""Decorate function to give error message if invoked without required optional modules.
This decorator is to give better error message to users rather
than raising ``NameError: name 'module' is not defined`` at random places.
"""
missing = [m for m in modules if not is_module_available(m)]
if not missing:
# fall through. If all the modules are available, no need to decorate
def decorator(func):
return func
else:
req = f"module: {missing[0]}" if len(missing) == 1 else f"modules: {missing}"
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires {req}")
return wrapped
return decorator
def deprecated(direction: str, version: Optional[str] = None):
"""Decorator to add deprecation message
Args:
direction (str): Migration steps to be given to users.
version (str or int): The version when the object will be removed
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = (
f"{func.__module__}.{func.__name__} has been deprecated "
f'and will be removed from {"future" if version is None else version} release. '
f"{direction}"
)
warnings.warn(message, stacklevel=2)
return func(*args, **kwargs)
return wrapped
return decorator
def is_kaldi_available():
try:
return torch.ops.torchaudio.is_kaldi_available()
except Exception:
return False
def requires_kaldi():
if is_kaldi_available():
def decorator(func):
return func
else:
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires kaldi")
return wrapped
return decorator
def _check_soundfile_importable():
if not is_module_available("soundfile"):
return False
try:
import soundfile # noqa: F401
return True
except Exception:
warnings.warn("Failed to import soundfile. 'soundfile' backend is not available.")
return False
_is_soundfile_importable = _check_soundfile_importable()
def is_soundfile_available():
return _is_soundfile_importable
def requires_soundfile():
if is_soundfile_available():
def decorator(func):
return func
else:
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires soundfile")
return wrapped
return decorator
def is_sox_available():
return is_module_available("torchaudio.lib._torchaudio_sox")
def requires_sox():
if is_sox_available():
def decorator(func):
return func
else:
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires sox")
return wrapped
return decorator
|
#!/usr/bin/env python
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from .custom_blocks import CustomBlocksCommand
from .env import EnvironmentCommand
from .fp16_safetensors import FP16SafetensorsCommand
def main():
parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli <command> [<args>]")
commands_parser = parser.add_subparsers(help="diffusers-cli command helpers")
# Register commands
EnvironmentCommand.register_subcommand(commands_parser)
FP16SafetensorsCommand.register_subcommand(commands_parser)
CustomBlocksCommand.register_subcommand(commands_parser)
# Let's go
args = parser.parse_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
# Run
service = args.func(args)
service.run()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from .env import EnvironmentCommand
from .fp16_safetensors import FP16SafetensorsCommand
def main():
parser = ArgumentParser("Diffusers CLI tool", usage="diffusers-cli <command> [<args>]")
commands_parser = parser.add_subparsers(help="diffusers-cli command helpers")
# Register commands
EnvironmentCommand.register_subcommand(commands_parser)
FP16SafetensorsCommand.register_subcommand(commands_parser)
# Let's go
args = parser.parse_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
# Run
service = args.func(args)
service.run()
if __name__ == "__main__":
main()
|
import types
from typing import TYPE_CHECKING
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401
from docarray.index.backends.elasticv7 import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
from docarray.index.backends.qdrant import QdrantDocumentIndex # noqa: F401
__all__ = []
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticDocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elasticv7 as lib
elif name == 'QdrantDocumentIndex':
import_library('qdrant_client', raise_error=True)
import docarray.index.backends.qdrant as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
import types
from typing import TYPE_CHECKING
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401
from docarray.index.backends.elasticv7 import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
__all__ = []
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticDocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elasticv7 as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .torchscript_consistency_test_impl import TorchScriptConsistencyCPUOnlyTestImpl, TorchScriptConsistencyTestImpl
class TorchScriptConsistencyCPUFloat32Test(TorchScriptConsistencyTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TorchScriptConsistencyCPUFloat64Test(TorchScriptConsistencyTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
class TorchScriptConsistencyCPUOnlyFloat32Test(TorchScriptConsistencyCPUOnlyTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TorchScriptConsistencyCPUOnlyFloat64Test(TorchScriptConsistencyCPUOnlyTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .torchscript_consistency_test_impl import TorchScriptConsistencyTestImpl
class TorchScriptConsistencyCPUFloat32Test(TorchScriptConsistencyTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TorchScriptConsistencyCPUFloat64Test(TorchScriptConsistencyTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
|
"""Test program utils."""
import pytest
from typing import List, Optional
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.base.llms.types import ChatMessage, ChatResponse, MessageRole
from llama_index.core.program.utils import (
_repair_incomplete_json,
process_streaming_objects,
num_valid_fields,
create_flexible_model,
)
class Person(BaseModel):
name: str
age: Optional[int] = None
hobbies: List[str] = Field(default_factory=list)
def test_repair_incomplete_json() -> None:
"""Test JSON repair function."""
# Test adding missing quotes
assert _repair_incomplete_json('{"name": "John') == '{"name": "John"}'
# Test adding missing braces
assert _repair_incomplete_json('{"name": "John"') == '{"name": "John"}'
# Test empty string
assert _repair_incomplete_json("") == "{}"
# Test already valid JSON
valid_json = '{"name": "John", "age": 30}'
assert _repair_incomplete_json(valid_json) == valid_json
def test_process_streaming_objects() -> None:
"""Test processing streaming objects."""
# Test processing complete object
response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content='{"name": "John", "age": 30}',
)
)
result = process_streaming_objects(response, Person)
assert isinstance(result, Person)
assert result.name == "John"
assert result.age == 30
# Test processing incomplete object
incomplete_response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content='{"name": "John", "age":',
)
)
# Should return empty object when can't parse
result = process_streaming_objects(incomplete_response, Person)
assert result.name is None # Default value
# Test with previous state
prev_obj = Person(name="John", age=25)
result = process_streaming_objects(
incomplete_response, Person, cur_objects=[prev_obj]
)
assert isinstance(result, Person)
assert result.name == "John"
assert result.age == 25 # Keeps previous state
# Test with tool calls
tool_call_response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content="",
additional_kwargs={
"tool_calls": [
{
"function": {
"name": "create_person",
"arguments": '{"name": "Jane", "age": 28}',
}
}
]
},
)
)
# Mock LLM for tool calls
class MockLLM:
def get_tool_calls_from_response(self, *args, **kwargs):
return [
type(
"ToolSelection",
(),
{"tool_kwargs": {"name": "Jane", "age": 28}},
)
]
result = process_streaming_objects(
tool_call_response,
Person,
llm=MockLLM(), # type: ignore
)
assert isinstance(result, Person)
assert result.name == "Jane"
assert result.age == 28
def test_num_valid_fields() -> None:
"""Test counting valid fields."""
# Test simple object
person = Person(name="John", age=None, hobbies=[])
assert num_valid_fields(person) == 1 # Only name is non-None
# Test with more fields
person = Person(name="John", age=30, hobbies=["reading"])
assert num_valid_fields(person) == 3 # All fields are non-None
# Test list of objects
people = [
Person(name="John", age=30),
Person(name="Jane", hobbies=["reading"]),
]
assert num_valid_fields(people) == 4 # 2 names + 1 age + 1 hobby list
# Test nested object
class Family(BaseModel):
parent: Person
children: List[Person] = []
family = Family(
parent=Person(name="John", age=40),
children=[Person(name="Jane", age=10)],
)
assert num_valid_fields(family) == 4 # parent's name & age + child's name & age
def test_create_flexible_model() -> None:
"""Test creating flexible model."""
FlexiblePerson = create_flexible_model(Person)
# Should accept partial data
flexible_person = FlexiblePerson(name="John")
assert flexible_person.name == "John"
assert flexible_person.age is None
# Should accept extra fields
flexible_person = FlexiblePerson(
name="John", extra_field="value", another_field=123
)
assert flexible_person.name == "John"
assert hasattr(flexible_person, "extra_field")
assert flexible_person.extra_field == "value"
# Original model should still be strict
with pytest.raises(ValueError):
Person(name=None) # type: ignore
|
"""Test program utils."""
import pytest
from typing import List, Optional
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.base.llms.types import ChatMessage, ChatResponse, MessageRole
from llama_index.core.program.utils import (
_repair_incomplete_json,
process_streaming_objects,
num_valid_fields,
create_flexible_model,
)
class Person(BaseModel):
name: str
age: Optional[int] = None
hobbies: List[str] = Field(default_factory=list)
def test_repair_incomplete_json() -> None:
"""Test JSON repair function."""
# Test adding missing quotes
assert _repair_incomplete_json('{"name": "John') == '{"name": "John"}'
# Test adding missing braces
assert _repair_incomplete_json('{"name": "John"') == '{"name": "John"}'
# Test empty string
assert _repair_incomplete_json("") == "{}"
# Test already valid JSON
valid_json = '{"name": "John", "age": 30}'
assert _repair_incomplete_json(valid_json) == valid_json
def test_process_streaming_objects() -> None:
"""Test processing streaming objects."""
# Test processing complete object
response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content='{"name": "John", "age": 30}',
)
)
result = process_streaming_objects(response, Person)
assert isinstance(result, Person)
assert result.name == "John"
assert result.age == 30
# Test processing incomplete object
incomplete_response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content='{"name": "John", "age":',
)
)
# Should return empty object when can't parse
result = process_streaming_objects(incomplete_response, Person)
assert result.name is None # Default value
# Test with previous state
prev_obj = Person(name="John", age=25)
result = process_streaming_objects(
incomplete_response, Person, cur_objects=[prev_obj]
)
assert isinstance(result, Person)
assert result.name == "John"
assert result.age == 25 # Keeps previous state
# Test with tool calls
tool_call_response = ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
content="",
additional_kwargs={
"tool_calls": [
{
"function": {
"name": "create_person",
"arguments": '{"name": "Jane", "age": 28}',
}
}
]
},
)
)
# Mock LLM for tool calls
class MockLLM:
def get_tool_calls_from_response(self, *args, **kwargs):
return [
type(
"ToolSelection",
(),
{"tool_kwargs": {"name": "Jane", "age": 28}},
)
]
result = process_streaming_objects(
tool_call_response, Person, llm=MockLLM() # type: ignore
)
assert isinstance(result, Person)
assert result.name == "Jane"
assert result.age == 28
def test_num_valid_fields() -> None:
"""Test counting valid fields."""
# Test simple object
person = Person(name="John", age=None, hobbies=[])
assert num_valid_fields(person) == 1 # Only name is non-None
# Test with more fields
person = Person(name="John", age=30, hobbies=["reading"])
assert num_valid_fields(person) == 3 # All fields are non-None
# Test list of objects
people = [
Person(name="John", age=30),
Person(name="Jane", hobbies=["reading"]),
]
assert num_valid_fields(people) == 4 # 2 names + 1 age + 1 hobby list
# Test nested object
class Family(BaseModel):
parent: Person
children: List[Person] = []
family = Family(
parent=Person(name="John", age=40),
children=[Person(name="Jane", age=10)],
)
assert num_valid_fields(family) == 4 # parent's name & age + child's name & age
def test_create_flexible_model() -> None:
"""Test creating flexible model."""
FlexiblePerson = create_flexible_model(Person)
# Should accept partial data
flexible_person = FlexiblePerson(name="John")
assert flexible_person.name == "John"
assert flexible_person.age is None
# Should accept extra fields
flexible_person = FlexiblePerson(
name="John", extra_field="value", another_field=123
)
assert flexible_person.name == "John"
assert hasattr(flexible_person, "extra_field")
assert flexible_person.extra_field == "value"
# Original model should still be strict
with pytest.raises(ValueError):
Person(name=None) # type: ignore
|
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
import logging
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
logger = logging.getLogger(__name__)
class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: Runnable
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> List[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_schema.model_json_schema()["required"]
@property
def output_keys(self) -> List[str]:
"""Output keys for Hyde's LLM chain."""
if isinstance(self.llm_chain, LLMChain):
return self.llm_chain.output_keys
else:
return ["text"]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]:
"""Combine embeddings into final embeddings."""
try:
import numpy as np
return list(np.array(embeddings).mean(axis=0))
except ImportError:
logger.warning(
"NumPy not found in the current Python environment. "
"HypotheticalDocumentEmbedder will use a pure Python implementation "
"for internal calculations, which may significantly impact "
"performance, especially for large datasets. For optimal speed and "
"efficiency, consider installing NumPy: pip install numpy"
)
if not embeddings:
return []
num_vectors = len(embeddings)
return [sum(dim_values) / num_vectors for dim_values in zip(*embeddings)]
def embed_query(self, text: str) -> List[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.input_keys[0]
result = self.llm_chain.invoke({var_name: text})
if isinstance(self.llm_chain, LLMChain):
documents = [result[self.output_keys[0]]]
else:
documents = [result]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain.invoke(
inputs, config={"callbacks": _run_manager.get_child()}
)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
base_embeddings: Embeddings,
prompt_key: Optional[str] = None,
custom_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain with either a specific prompt key or custom prompt."""
if custom_prompt is not None:
prompt = custom_prompt
elif prompt_key is not None and prompt_key in PROMPT_MAP:
prompt = PROMPT_MAP[prompt_key]
else:
raise ValueError(
f"Must specify prompt_key if custom_prompt not provided. Should be one "
f"of {list(PROMPT_MAP.keys())}."
)
llm_chain = prompt | llm | StrOutputParser()
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@property
def _chain_type(self) -> str:
return "hyde_chain"
|
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
import numpy as np
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: Runnable
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> List[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_schema.model_json_schema()["required"]
@property
def output_keys(self) -> List[str]:
"""Output keys for Hyde's LLM chain."""
if isinstance(self.llm_chain, LLMChain):
return self.llm_chain.output_keys
else:
return ["text"]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]:
"""Combine embeddings into final embeddings."""
return list(np.array(embeddings).mean(axis=0))
def embed_query(self, text: str) -> List[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.input_keys[0]
result = self.llm_chain.invoke({var_name: text})
if isinstance(self.llm_chain, LLMChain):
documents = [result[self.output_keys[0]]]
else:
documents = [result]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain.invoke(
inputs, config={"callbacks": _run_manager.get_child()}
)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
base_embeddings: Embeddings,
prompt_key: Optional[str] = None,
custom_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain with either a specific prompt key or custom prompt."""
if custom_prompt is not None:
prompt = custom_prompt
elif prompt_key is not None and prompt_key in PROMPT_MAP:
prompt = PROMPT_MAP[prompt_key]
else:
raise ValueError(
f"Must specify prompt_key if custom_prompt not provided. Should be one "
f"of {list(PROMPT_MAP.keys())}."
)
llm_chain = prompt | llm | StrOutputParser()
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@property
def _chain_type(self) -> str:
return "hyde_chain"
|
import datetime
import prisma.fields
import prisma.models
import pytest
import backend.server.v2.library.model as library_model
@pytest.mark.asyncio
async def test_agent_preset_from_db():
# Create mock DB agent
db_agent = prisma.models.AgentPreset(
id="test-agent-123",
createdAt=datetime.datetime.now(),
updatedAt=datetime.datetime.now(),
agentGraphId="agent-123",
agentGraphVersion=1,
name="Test Agent",
description="Test agent description",
isActive=True,
userId="test-user-123",
isDeleted=False,
InputPresets=[
prisma.models.AgentNodeExecutionInputOutput.model_validate(
{
"id": "input-123",
"time": datetime.datetime.now(),
"name": "input1",
"data": '{"type": "string", "value": "test value"}',
}
)
],
)
# Convert to LibraryAgentPreset
agent = library_model.LibraryAgentPreset.from_db(db_agent)
assert agent.id == "test-agent-123"
assert agent.graph_version == 1
assert agent.is_active is True
assert agent.name == "Test Agent"
assert agent.description == "Test agent description"
assert agent.inputs == {"input1": {"type": "string", "value": "test value"}}
|
import datetime
import prisma.fields
import prisma.models
import pytest
import backend.server.v2.library.model as library_model
@pytest.mark.asyncio
async def test_agent_preset_from_db():
# Create mock DB agent
db_agent = prisma.models.AgentPreset(
id="test-agent-123",
createdAt=datetime.datetime.now(),
updatedAt=datetime.datetime.now(),
agentGraphId="agent-123",
agentGraphVersion=1,
name="Test Agent",
description="Test agent description",
isActive=True,
userId="test-user-123",
isDeleted=False,
InputPresets=[
prisma.models.AgentNodeExecutionInputOutput(
id="input-123",
time=datetime.datetime.now(),
name="input1",
data=prisma.Json({"type": "string", "value": "test value"}),
)
],
)
# Convert to LibraryAgentPreset
agent = library_model.LibraryAgentPreset.from_db(db_agent)
assert agent.id == "test-agent-123"
assert agent.graph_version == 1
assert agent.is_active is True
assert agent.name == "Test Agent"
assert agent.description == "Test agent description"
assert agent.inputs == {"input1": {"type": "string", "value": "test value"}}
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import bbox_overlaps, get_box_tensor
def cast_tensor_type(x, scale=1., dtype=None):
if dtype == 'fp16':
# scale is for preventing overflows
x = (x / scale).half()
return x
@TASK_UTILS.register_module()
class BboxOverlaps2D:
"""2D Overlaps (e.g. IoUs, GIoUs) Calculator."""
def __init__(self, scale=1., dtype=None):
self.scale = scale
self.dtype = dtype
def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate IoU between 2D bboxes.
Args:
bboxes1 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)
in <x1, y1, x2, y2> format, or shape (m, 5) in <x1, y1, x2,
y2, score> format.
bboxes2 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)
in <x1, y1, x2, y2> format, shape (m, 5) in <x1, y1, x2, y2,
score> format, or be empty. If ``is_aligned `` is ``True``,
then m and n must be equal.
mode (str): "iou" (intersection over union), "iof" (intersection
over foreground), or "giou" (generalized intersection over
union).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
"""
bboxes1 = get_box_tensor(bboxes1)
bboxes2 = get_box_tensor(bboxes2)
assert bboxes1.size(-1) in [0, 4, 5]
assert bboxes2.size(-1) in [0, 4, 5]
if bboxes2.size(-1) == 5:
bboxes2 = bboxes2[..., :4]
if bboxes1.size(-1) == 5:
bboxes1 = bboxes1[..., :4]
if self.dtype == 'fp16':
# change tensor type to save cpu and cuda memory and keep speed
bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype)
bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype)
overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
if not overlaps.is_cuda and overlaps.dtype == torch.float16:
# resume cpu float32
overlaps = overlaps.float()
return overlaps
return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
def __repr__(self):
"""str: a string describing the module"""
repr_str = self.__class__.__name__ + f'(' \
f'scale={self.scale}, dtype={self.dtype})'
return repr_str
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.models.utils.misc import get_box_tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import bbox_overlaps
def cast_tensor_type(x, scale=1., dtype=None):
if dtype == 'fp16':
# scale is for preventing overflows
x = (x / scale).half()
return x
@TASK_UTILS.register_module()
class BboxOverlaps2D:
"""2D Overlaps (e.g. IoUs, GIoUs) Calculator."""
def __init__(self, scale=1., dtype=None):
self.scale = scale
self.dtype = dtype
def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate IoU between 2D bboxes.
Args:
bboxes1 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)
in <x1, y1, x2, y2> format, or shape (m, 5) in <x1, y1, x2,
y2, score> format.
bboxes2 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)
in <x1, y1, x2, y2> format, shape (m, 5) in <x1, y1, x2, y2,
score> format, or be empty. If ``is_aligned `` is ``True``,
then m and n must be equal.
mode (str): "iou" (intersection over union), "iof" (intersection
over foreground), or "giou" (generalized intersection over
union).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
"""
bboxes1 = get_box_tensor(bboxes1)
bboxes2 = get_box_tensor(bboxes2)
assert bboxes1.size(-1) in [0, 4, 5]
assert bboxes2.size(-1) in [0, 4, 5]
if bboxes2.size(-1) == 5:
bboxes2 = bboxes2[..., :4]
if bboxes1.size(-1) == 5:
bboxes1 = bboxes1[..., :4]
if self.dtype == 'fp16':
# change tensor type to save cpu and cuda memory and keep speed
bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype)
bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype)
overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
if not overlaps.is_cuda and overlaps.dtype == torch.float16:
# resume cpu float32
overlaps = overlaps.float()
return overlaps
return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
def __repr__(self):
"""str: a string describing the module"""
repr_str = self.__class__.__name__ + f'(' \
f'scale={self.scale}, dtype={self.dtype})'
return repr_str
|
# pants requires this import to recognize the dep
import pytest_asyncio # noqa: F401
import pytest
import os
from llama_index.multi_modal_llms.nvidia import NVIDIAMultiModal as Interface
from llama_index.multi_modal_llms.nvidia.utils import DEFAULT_MODEL
from typing import Generator
# this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it
# after the test. it also returns the value of the NVIDIA_API_KEY environment variable
# before it was masked so that it can be used in the test.
@pytest.fixture()
def masked_env_var() -> Generator[str, None, None]:
var = "NVIDIA_API_KEY"
try:
if val := os.environ.get(var, None):
del os.environ[var]
yield val
finally:
if val:
os.environ[var] = val
def pytest_collection_modifyitems(config, items):
if "NVIDIA_API_KEY" not in os.environ:
skip_marker = pytest.mark.skip(
reason="requires NVIDIA_API_KEY environment variable"
)
for item in items:
item.add_marker(skip_marker)
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--all-models",
action="store_true",
help="Run tests across all models",
)
parser.addoption(
"--model-id",
action="store",
help="Run tests for a specific chat model",
)
parser.addoption(
"--nim-endpoint",
type=str,
help="Run tests using NIM mode",
)
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
if "vlm_model" in metafunc.fixturenames:
models = [DEFAULT_MODEL]
if model := metafunc.config.getoption("--model-id"):
models = [model]
elif metafunc.config.getoption("--all-models"):
models = [model.id for model in Interface().available_models]
metafunc.parametrize("vlm_model", models, ids=models)
|
import pytest
import os
from llama_index.multi_modal_llms.nvidia import NVIDIAMultiModal as Interface
from llama_index.multi_modal_llms.nvidia.utils import DEFAULT_MODEL
from typing import Generator
# this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it
# after the test. it also returns the value of the NVIDIA_API_KEY environment variable
# before it was masked so that it can be used in the test.
@pytest.fixture()
def masked_env_var() -> Generator[str, None, None]:
var = "NVIDIA_API_KEY"
try:
if val := os.environ.get(var, None):
del os.environ[var]
yield val
finally:
if val:
os.environ[var] = val
def pytest_collection_modifyitems(config, items):
if "NVIDIA_API_KEY" not in os.environ:
skip_marker = pytest.mark.skip(
reason="requires NVIDIA_API_KEY environment variable"
)
for item in items:
item.add_marker(skip_marker)
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--all-models",
action="store_true",
help="Run tests across all models",
)
parser.addoption(
"--model-id",
action="store",
help="Run tests for a specific chat model",
)
parser.addoption(
"--nim-endpoint",
type=str,
help="Run tests using NIM mode",
)
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
if "vlm_model" in metafunc.fixturenames:
models = [DEFAULT_MODEL]
if model := metafunc.config.getoption("--model-id"):
models = [model]
elif metafunc.config.getoption("--all-models"):
models = [model.id for model in Interface().available_models]
metafunc.parametrize("vlm_model", models, ids=models)
|
import os
import tempfile
import httpx
import pytest
from PIL import Image
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.cohere import CohereEmbedding
from llama_index.embeddings.cohere.base import VALID_MODEL_INPUT_TYPES
def test_embedding_class():
emb = CohereEmbedding(api_key="token")
assert isinstance(emb, BaseEmbedding)
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
def test_sync_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-english-v3.0",
input_type="clustering",
embedding_type="float",
httpx_client=httpx.Client(),
)
emb.get_query_embedding("I love Cohere!")
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_async_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-english-v3.0",
input_type="clustering",
embedding_type="float",
httpx_async_client=httpx.AsyncClient(),
)
await emb.aget_query_embedding("I love Cohere!")
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_v4_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-v4.0",
)
embeddings = await emb.aget_text_embedding("I love Cohere!")
assert len(embeddings) > 0
embeddings2 = emb.get_text_embedding("I love Cohere!")
assert len(embeddings2) > 0
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_embed_batch():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-v4.0",
)
embeddings = await emb.aget_text_embedding_batch(
["I love Cohere!", "I love Cohere!"]
)
assert len(embeddings) == 2
assert len(embeddings[0]) > 0
assert len(embeddings[1]) > 0
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_embed_image():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-v4.0",
)
# create a test image in a temp file
image = Image.new("RGB", (100, 100), color="red")
with tempfile.NamedTemporaryFile(suffix=".png") as f:
image.save(f.name)
embedding = await emb.aget_image_embedding(f.name)
embedding2 = emb.get_image_embedding(f.name)
assert len(embedding) > 0
assert len(embedding2) > 0
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_embed_image_batch():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-v4.0",
)
# create a test image in a temp file
image = Image.new("RGB", (100, 100), color="red")
with tempfile.NamedTemporaryFile(suffix=".png") as f:
image.save(f.name)
embeddings = await emb.aget_image_embedding_batch([f.name, f.name])
embeddings2 = emb.get_image_embedding_batch([f.name, f.name])
assert len(embeddings) == 2
assert len(embeddings[0]) > 0
assert len(embeddings[1]) > 0
assert len(embeddings2) == 2
assert len(embeddings2[0]) > 0
assert len(embeddings2[1]) > 0
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
def test_all_model_names():
for model_name in VALID_MODEL_INPUT_TYPES:
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name=model_name,
)
embedding = emb.get_text_embedding("Hello, world!")
assert len(embedding) > 0
def test_cohere_embeddings_custom_endpoint_multiprocessing():
"""
When used in multiprocessing, the CohereEmbedding instance will be serialized and deserialized. This test
verifies, that custom base_url's are retained in the spawned processes.
"""
# Arrange: Create a CohereEmbeddings instance with a custom base_url
custom_base_url = "test_endpoint"
api_key = "test_api_key"
embeddings = CohereEmbedding(api_key=api_key, base_url=custom_base_url)
# Act: Simulate serialization and deserialization
serialized_data = embeddings.__getstate__()
deserialized_embeddings = CohereEmbedding.__new__(CohereEmbedding)
deserialized_embeddings.__setstate__(serialized_data)
# Assert: Verify that the deserialized instance retains the correct base_url
assert deserialized_embeddings.base_url == custom_base_url
|
import os
import tempfile
import httpx
import pytest
from PIL import Image
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.cohere import CohereEmbedding
from llama_index.embeddings.cohere.base import VALID_MODEL_INPUT_TYPES
def test_embedding_class():
emb = CohereEmbedding(api_key="token")
assert isinstance(emb, BaseEmbedding)
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
def test_sync_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-english-v3.0",
input_type="clustering",
embedding_type="float",
httpx_client=httpx.Client(),
)
emb.get_query_embedding("I love Cohere!")
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_async_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-english-v3.0",
input_type="clustering",
embedding_type="float",
httpx_async_client=httpx.AsyncClient(),
)
await emb.aget_query_embedding("I love Cohere!")
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_v4_embedding():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-v4.0",
)
embeddings = await emb.aget_text_embedding("I love Cohere!")
assert len(embeddings) > 0
embeddings2 = emb.get_text_embedding("I love Cohere!")
assert len(embeddings2) > 0
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_embed_batch():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-v4.0",
)
embeddings = await emb.aget_text_embedding_batch(["I love Cohere!", "I love Cohere!"])
assert len(embeddings) == 2
assert len(embeddings[0]) > 0
assert len(embeddings[1]) > 0
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_embed_image():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-v4.0",
)
# create a test image in a temp file
image = Image.new("RGB", (100, 100), color="red")
with tempfile.NamedTemporaryFile(suffix=".png") as f:
image.save(f.name)
embedding = await emb.aget_image_embedding(f.name)
embedding2 = emb.get_image_embedding(f.name)
assert len(embedding) > 0
assert len(embedding2) > 0
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
@pytest.mark.asyncio
async def test_embed_image_batch():
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name="embed-v4.0",
)
# create a test image in a temp file
image = Image.new("RGB", (100, 100), color="red")
with tempfile.NamedTemporaryFile(suffix=".png") as f:
image.save(f.name)
embeddings = await emb.aget_image_embedding_batch([f.name, f.name])
embeddings2 = emb.get_image_embedding_batch([f.name, f.name])
assert len(embeddings) == 2
assert len(embeddings[0]) > 0
assert len(embeddings[1]) > 0
assert len(embeddings2) == 2
assert len(embeddings2[0]) > 0
assert len(embeddings2[1]) > 0
@pytest.mark.skipif(
os.environ.get("CO_API_KEY") is None, reason="Cohere API key required"
)
def test_all_model_names():
for model_name in VALID_MODEL_INPUT_TYPES:
emb = CohereEmbedding(
api_key=os.environ["CO_API_KEY"],
model_name=model_name,
)
embedding = emb.get_text_embedding("Hello, world!")
assert len(embedding) > 0
def test_cohere_embeddings_custom_endpoint_multiprocessing():
"""
When used in multiprocessing, the CohereEmbedding instance will be serialized and deserialized. This test
verifies, that custom base_url's are retained in the spawned processes.
"""
# Arrange: Create a CohereEmbeddings instance with a custom base_url
custom_base_url = "test_endpoint"
api_key = "test_api_key"
embeddings = CohereEmbedding(api_key=api_key, base_url=custom_base_url)
# Act: Simulate serialization and deserialization
serialized_data = embeddings.__getstate__()
deserialized_embeddings = CohereEmbedding.__new__(CohereEmbedding)
deserialized_embeddings.__setstate__(serialized_data)
# Assert: Verify that the deserialized instance retains the correct base_url
assert deserialized_embeddings.base_url == custom_base_url
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadPanopticAnnotations', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadPanopticAnnotations', file_client_args=file_client_args),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_train2017.json',
data_prefix=dict(
img='train2017/', seg='annotations/panoptic_train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_val2017.json',
data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
file_client_args=file_client_args,
)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=1,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/panoptic_image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoPanopticMetric',
# format_only=True,
# ann_file=data_root + 'annotations/panoptic_image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_panoptic/test')
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadPanopticAnnotations', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadPanopticAnnotations', file_client_args=file_client_args),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_train2017.json',
data_prefix=dict(
img='train2017/', seg='annotations/panoptic_train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_val2017.json',
data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
file_client_args=file_client_args,
)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=1,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/panoptic_image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/', seg=None),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoPanopticMetric',
# format_only=True,
# outfile_prefix='./work_dirs/coco_panoptic/test')
|
from langchain_core.callbacks import __all__
EXPECTED_ALL = [
"RetrieverManagerMixin",
"LLMManagerMixin",
"ChainManagerMixin",
"ToolManagerMixin",
"Callbacks",
"CallbackManagerMixin",
"RunManagerMixin",
"BaseCallbackHandler",
"AsyncCallbackHandler",
"BaseCallbackManager",
"BaseRunManager",
"RunManager",
"ParentRunManager",
"AsyncRunManager",
"AsyncParentRunManager",
"CallbackManagerForLLMRun",
"AsyncCallbackManagerForLLMRun",
"CallbackManagerForChainRun",
"AsyncCallbackManagerForChainRun",
"CallbackManagerForToolRun",
"AsyncCallbackManagerForToolRun",
"CallbackManagerForRetrieverRun",
"AsyncCallbackManagerForRetrieverRun",
"CallbackManager",
"CallbackManagerForChainGroup",
"AsyncCallbackManager",
"AsyncCallbackManagerForChainGroup",
"StdOutCallbackHandler",
"StreamingStdOutCallbackHandler",
"FileCallbackHandler",
"adispatch_custom_event",
"dispatch_custom_event",
"UsageMetadataCallbackHandler",
"get_usage_metadata_callback",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
from langchain_core.callbacks import __all__
EXPECTED_ALL = [
"RetrieverManagerMixin",
"LLMManagerMixin",
"ChainManagerMixin",
"ToolManagerMixin",
"Callbacks",
"CallbackManagerMixin",
"RunManagerMixin",
"BaseCallbackHandler",
"AsyncCallbackHandler",
"BaseCallbackManager",
"BaseRunManager",
"RunManager",
"ParentRunManager",
"AsyncRunManager",
"AsyncParentRunManager",
"CallbackManagerForLLMRun",
"AsyncCallbackManagerForLLMRun",
"CallbackManagerForChainRun",
"AsyncCallbackManagerForChainRun",
"CallbackManagerForToolRun",
"AsyncCallbackManagerForToolRun",
"CallbackManagerForRetrieverRun",
"AsyncCallbackManagerForRetrieverRun",
"CallbackManager",
"CallbackManagerForChainGroup",
"AsyncCallbackManager",
"AsyncCallbackManagerForChainGroup",
"StdOutCallbackHandler",
"StreamingStdOutCallbackHandler",
"FileCallbackHandler",
"adispatch_custom_event",
"dispatch_custom_event",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
import json
import logging
import os
from typing import Dict, List
import torch
from torch import Tensor, nn
logger = logging.getLogger(__name__)
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: List[str], word_weights: Dict[str, float], unknown_word_weight: float = 1):
"""
Initializes the WordWeights class.
Args:
vocab (List[str]): Vocabulary of the tokenizer.
word_weights (Dict[str, float]): Mapping of tokens to a float weight value. Word embeddings are multiplied
by this float value. Tokens in word_weights must not be equal to the vocab (can contain more or less values).
unknown_word_weight (float, optional): Weight for words in vocab that do not appear in the word_weights lookup.
These can be, for example, rare words in the vocab where no weight exists. Defaults to 1.
"""
super(WordWeights, self).__init__()
self.config_keys = ["vocab", "word_weights", "unknown_word_weight"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
weights.append(weight)
logger.info(
"{} of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.emb_layer = nn.Embedding(len(vocab), 1)
self.emb_layer.load_state_dict({"weight": torch.FloatTensor(weights).unsqueeze(1)})
def forward(self, features: Dict[str, Tensor]):
attention_mask = features["attention_mask"]
token_embeddings = features["token_embeddings"]
# Compute a weight value for each token
token_weights_raw = self.emb_layer(features["input_ids"]).squeeze(-1)
token_weights = token_weights_raw * attention_mask.float()
token_weights_sum = torch.sum(token_weights, 1)
# Multiply embedding by token weight value
token_weights_expanded = token_weights.unsqueeze(-1).expand(token_embeddings.size())
token_embeddings = token_embeddings * token_weights_expanded
features.update({"token_embeddings": token_embeddings, "token_weights_sum": token_weights_sum})
return features
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return WordWeights(**config)
|
import torch
from torch import Tensor
from torch import nn
from typing import List, Dict
import os
import json
import logging
logger = logging.getLogger(__name__)
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: List[str], word_weights: Dict[str, float], unknown_word_weight: float = 1):
"""
Initializes the WordWeights class.
Args:
vocab (List[str]): Vocabulary of the tokenizer.
word_weights (Dict[str, float]): Mapping of tokens to a float weight value. Word embeddings are multiplied
by this float value. Tokens in word_weights must not be equal to the vocab (can contain more or less values).
unknown_word_weight (float, optional): Weight for words in vocab that do not appear in the word_weights lookup.
These can be, for example, rare words in the vocab where no weight exists. Defaults to 1.
"""
super(WordWeights, self).__init__()
self.config_keys = ["vocab", "word_weights", "unknown_word_weight"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
weights.append(weight)
logger.info(
"{} of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.emb_layer = nn.Embedding(len(vocab), 1)
self.emb_layer.load_state_dict({"weight": torch.FloatTensor(weights).unsqueeze(1)})
def forward(self, features: Dict[str, Tensor]):
attention_mask = features["attention_mask"]
token_embeddings = features["token_embeddings"]
# Compute a weight value for each token
token_weights_raw = self.emb_layer(features["input_ids"]).squeeze(-1)
token_weights = token_weights_raw * attention_mask.float()
token_weights_sum = torch.sum(token_weights, 1)
# Multiply embedding by token weight value
token_weights_expanded = token_weights.unsqueeze(-1).expand(token_embeddings.size())
token_embeddings = token_embeddings * token_weights_expanded
features.update({"token_embeddings": token_embeddings, "token_weights_sum": token_weights_sum})
return features
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return WordWeights(**config)
|
import pytest
from backend.data import db
from backend.executor.scheduler import SchedulerClient
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
from backend.util.test import SpinTestServer
@pytest.mark.asyncio(loop_scope="session")
async def test_agent_schedule(server: SpinTestServer):
await db.connect()
test_user = await create_test_user()
test_graph = await server.agent_server.test_create_graph(
create_graph=CreateGraph(graph=create_test_graph()),
user_id=test_user.id,
)
scheduler = get_service_client(SchedulerClient)
schedules = await scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 0
schedule = await scheduler.add_execution_schedule(
graph_id=test_graph.id,
user_id=test_user.id,
graph_version=1,
cron="0 0 * * *",
input_data={"input": "data"},
)
assert schedule
schedules = await scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 1
assert schedules[0].cron == "0 0 * * *"
await scheduler.delete_schedule(schedule.id, user_id=test_user.id)
schedules = await scheduler.get_execution_schedules(
test_graph.id, user_id=test_user.id
)
assert len(schedules) == 0
|
import pytest
from backend.data import db
from backend.executor import Scheduler
from backend.server.model import CreateGraph
from backend.usecases.sample import create_test_graph, create_test_user
from backend.util.service import get_service_client
from backend.util.test import SpinTestServer
@pytest.mark.asyncio(loop_scope="session")
async def test_agent_schedule(server: SpinTestServer):
await db.connect()
test_user = await create_test_user()
test_graph = await server.agent_server.test_create_graph(
create_graph=CreateGraph(graph=create_test_graph()),
user_id=test_user.id,
)
scheduler = get_service_client(Scheduler)
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 0
schedule = scheduler.add_execution_schedule(
graph_id=test_graph.id,
user_id=test_user.id,
graph_version=1,
cron="0 0 * * *",
input_data={"input": "data"},
)
assert schedule
schedules = scheduler.get_execution_schedules(test_graph.id, test_user.id)
assert len(schedules) == 1
assert schedules[0].cron == "0 0 * * *"
scheduler.delete_schedule(schedule.id, user_id=test_user.id)
schedules = scheduler.get_execution_schedules(test_graph.id, user_id=test_user.id)
assert len(schedules) == 0
|
import types
from typing_extensions import TYPE_CHECKING
from docarray.typing.tensor.embedding.embedding import AnyEmbedding
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.typing.tensor.embedding.jax_array import JaxArrayEmbedding # noqa
from docarray.typing.tensor.embedding.tensorflow import TensorFlowEmbedding # noqa
from docarray.typing.tensor.embedding.torch import TorchEmbedding # noqa
__all__ = ['NdArrayEmbedding', 'AnyEmbedding']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'TorchEmbedding':
import_library('torch', raise_error=True)
import docarray.typing.tensor.embedding.torch as lib
elif name == 'TensorFlowEmbedding':
import_library('tensorflow', raise_error=True)
import docarray.typing.tensor.embedding.tensorflow as lib
elif name == 'JaxArrayEmbedding':
import_library('jax', raise_error=True)
import docarray.typing.tensor.embedding.jax_array as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
tensor_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return tensor_cls
|
import types
from typing_extensions import TYPE_CHECKING
from docarray.typing.tensor.embedding.embedding import AnyEmbedding
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.typing.tensor.embedding.tensorflow import TensorFlowEmbedding # noqa
from docarray.typing.tensor.embedding.torch import TorchEmbedding # noqa
__all__ = ['NdArrayEmbedding', 'AnyEmbedding']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'TorchEmbedding':
import_library('torch', raise_error=True)
import docarray.typing.tensor.embedding.torch as lib
elif name == 'TensorFlowEmbedding':
import_library('tensorflow', raise_error=True)
import docarray.typing.tensor.embedding.tensorflow as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
tensor_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return tensor_cls
|
"""
Quantile Regression
===================
.. versionadded:: 2.0.0
The script is inspired by this awesome example in sklearn:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
.. note::
The feature is only supported using the Python, R, and C packages. In addition, quantile
crossing can happen due to limitation in the algorithm.
"""
import argparse
from typing import Dict
import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
def f(x: np.ndarray) -> np.ndarray:
"""The function to predict."""
return x * np.sin(x)
def quantile_loss(args: argparse.Namespace) -> None:
"""Train a quantile regression model."""
rng = np.random.RandomState(1994)
# Generate a synthetic dataset for demo, the generate process is from the sklearn
# example.
X = np.atleast_2d(rng.uniform(0, 10.0, size=1000)).T
expected_y = f(X).ravel()
sigma = 0.5 + X.ravel() / 10.0
noise = rng.lognormal(sigma=sigma) - np.exp(sigma**2.0 / 2.0)
y = expected_y + noise
# Train on 0.05 and 0.95 quantiles. The model is similar to multi-class and
# multi-target models.
alpha = np.array([0.05, 0.5, 0.95])
evals_result: Dict[str, Dict] = {}
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
# We will be using the `hist` tree method, quantile DMatrix can be used to preserve
# memory (which has nothing to do with quantile regression itself, see its document
# for details).
# Do not use the `exact` tree method for quantile regression, otherwise the
# performance might drop.
Xy = xgb.QuantileDMatrix(X_train, y_train)
# use Xy as a reference
Xy_test = xgb.QuantileDMatrix(X_test, y_test, ref=Xy)
booster = xgb.train(
{
# Use the quantile objective function.
"objective": "reg:quantileerror",
"tree_method": "hist",
"quantile_alpha": alpha,
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
# The evaluation result is a weighted average across multiple quantiles.
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
scores = booster.inplace_predict(xx)
# dim 1 is the quantiles
assert scores.shape[0] == xx.shape[0]
assert scores.shape[1] == alpha.shape[0]
y_lower = scores[:, 0] # alpha=0.05
y_med = scores[:, 1] # alpha=0.5, median
y_upper = scores[:, 2] # alpha=0.95
# Train a mse model for comparison
booster = xgb.train(
{
"objective": "reg:squarederror",
"tree_method": "hist",
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
y_pred = booster.inplace_predict(xx)
if args.plot:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(10, 10))
plt.plot(xx, f(xx), "g:", linewidth=3, label=r"$f(x) = x\,\sin(x)$")
plt.plot(X_test, y_test, "b.", markersize=10, label="Test observations")
plt.plot(xx, y_med, "r-", label="Predicted median")
plt.plot(xx, y_pred, "m-", label="Predicted mean")
plt.plot(xx, y_upper, "k-")
plt.plot(xx, y_lower, "k-")
plt.fill_between(
xx.ravel(), y_lower, y_upper, alpha=0.4, label="Predicted 90% interval"
)
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
plt.ylim(-10, 25)
plt.legend(loc="upper left")
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--plot",
action="store_true",
help="Specify it to enable plotting the outputs.",
)
args = parser.parse_args()
quantile_loss(args)
|
"""
Quantile Regression
===================
.. versionadded:: 2.0.0
The script is inspired by this awesome example in sklearn:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
.. note::
The feature is only supported using the Python, R, and C packages. In addition, quantile
crossing can happen due to limitation in the algorithm.
"""
import argparse
from typing import Dict
import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
def f(x: np.ndarray) -> np.ndarray:
"""The function to predict."""
return x * np.sin(x)
def quantile_loss(args: argparse.Namespace) -> None:
"""Train a quantile regression model."""
rng = np.random.RandomState(1994)
# Generate a synthetic dataset for demo, the generate process is from the sklearn
# example.
X = np.atleast_2d(rng.uniform(0, 10.0, size=1000)).T
expected_y = f(X).ravel()
sigma = 0.5 + X.ravel() / 10.0
noise = rng.lognormal(sigma=sigma) - np.exp(sigma**2.0 / 2.0)
y = expected_y + noise
# Train on 0.05 and 0.95 quantiles. The model is similar to multi-class and
# multi-target models.
alpha = np.array([0.05, 0.5, 0.95])
evals_result: Dict[str, Dict] = {}
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
# We will be using the `hist` tree method, quantile DMatrix can be used to preserve
# memory.
# Do not use the `exact` tree method for quantile regression, otherwise the
# performance might drop.
Xy = xgb.QuantileDMatrix(X, y)
# use Xy as a reference
Xy_test = xgb.QuantileDMatrix(X_test, y_test, ref=Xy)
booster = xgb.train(
{
# Use the quantile objective function.
"objective": "reg:quantileerror",
"tree_method": "hist",
"quantile_alpha": alpha,
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
# The evaluation result is a weighted average across multiple quantiles.
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
scores = booster.inplace_predict(xx)
# dim 1 is the quantiles
assert scores.shape[0] == xx.shape[0]
assert scores.shape[1] == alpha.shape[0]
y_lower = scores[:, 0] # alpha=0.05
y_med = scores[:, 1] # alpha=0.5, median
y_upper = scores[:, 2] # alpha=0.95
# Train a mse model for comparison
booster = xgb.train(
{
"objective": "reg:squarederror",
"tree_method": "hist",
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
y_pred = booster.inplace_predict(xx)
if args.plot:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(10, 10))
plt.plot(xx, f(xx), "g:", linewidth=3, label=r"$f(x) = x\,\sin(x)$")
plt.plot(X_test, y_test, "b.", markersize=10, label="Test observations")
plt.plot(xx, y_med, "r-", label="Predicted median")
plt.plot(xx, y_pred, "m-", label="Predicted mean")
plt.plot(xx, y_upper, "k-")
plt.plot(xx, y_lower, "k-")
plt.fill_between(
xx.ravel(), y_lower, y_upper, alpha=0.4, label="Predicted 90% interval"
)
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
plt.ylim(-10, 25)
plt.legend(loc="upper left")
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--plot",
action="store_true",
help="Specify it to enable plotting the outputs.",
)
args = parser.parse_args()
quantile_loss(args)
|
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil" # "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "naver/efficient-splade-V-large-doc" # "prithivida/Splade_PP_en_v1" # "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding shape and sparsity
print(f"Embedding shape: {embeddings.shape}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}%")
# Compute similarity matrix
similarity_matrix = model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
if __name__ == "__main__":
main()
|
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "prithivida/Splade_PP_en_v1" # "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cpu",
)
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding shape and sparsity
print(f"Embedding shape: {embeddings.shape}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}%")
# Compute similarity matrix
similarity_matrix = model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
if __name__ == "__main__":
main()
|
import importlib.machinery
import os
from torch.hub import _get_torch_home
_HOME = os.path.join(_get_torch_home(), "datasets", "vision")
_USE_SHARDED_DATASETS = False
IN_FBCODE = False
def _download_file_from_remote_location(fpath: str, url: str) -> None:
pass
def _is_remote_location_available() -> bool:
return False
try:
from torch.hub import load_state_dict_from_url # noqa: 401
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url # noqa: 401
def _get_extension_path(lib_name):
lib_dir = os.path.dirname(__file__)
if os.name == "nt":
# Register the main torchvision library location on the default DLL path
import ctypes
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
prev_error_mode = kernel32.SetErrorMode(0x0001)
if with_load_library_flags:
kernel32.AddDllDirectory.restype = ctypes.c_void_p
os.add_dll_directory(lib_dir)
kernel32.SetErrorMode(prev_error_mode)
loader_details = (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES)
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
ext_specs = extfinder.find_spec(lib_name)
if ext_specs is None:
raise ImportError
return ext_specs.origin
|
import importlib.machinery
import os
from torch.hub import _get_torch_home
_HOME = os.path.join(_get_torch_home(), "datasets", "vision")
_USE_SHARDED_DATASETS = False
def _download_file_from_remote_location(fpath: str, url: str) -> None:
pass
def _is_remote_location_available() -> bool:
return False
try:
from torch.hub import load_state_dict_from_url # noqa: 401
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url # noqa: 401
def _get_extension_path(lib_name):
lib_dir = os.path.dirname(__file__)
if os.name == "nt":
# Register the main torchvision library location on the default DLL path
import ctypes
kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
prev_error_mode = kernel32.SetErrorMode(0x0001)
if with_load_library_flags:
kernel32.AddDllDirectory.restype = ctypes.c_void_p
os.add_dll_directory(lib_dir)
kernel32.SetErrorMode(prev_error_mode)
loader_details = (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES)
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
ext_specs = extfinder.find_spec(lib_name)
if ext_specs is None:
raise ImportError
return ext_specs.origin
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_column_storage_init():
class InnerDoc(BaseDoc):
price: int
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
doc: InnerDoc
docs = [
MyDoc(tensor=np.zeros(10), name='hello', doc=InnerDoc(price=i))
for i in range(4)
]
storage = DocVec[MyDoc](docs)._storage
assert (storage.tensor_columns['tensor'] == np.zeros((4, 10))).all()
for name in storage.any_columns['name']:
assert name == 'hello'
inner_docs = storage.doc_columns['doc']
assert isinstance(inner_docs, DocVec[InnerDoc])
for i, doc in enumerate(inner_docs):
assert doc.price == i
def test_column_storage_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=i) for i in range(4)]
storage = DocVec[MyDoc](docs)._storage
view = ColumnStorageView(0, storage)
assert view['id'] == '0'
assert (view['tensor'] == np.zeros(10)).all()
assert view['name'] == 'hello'
view['id'] = 1
view['tensor'] = np.ones(10)
view['name'] = 'byebye'
assert storage.any_columns['id'][0] == 1
assert (storage.tensor_columns['tensor'][0] == np.ones(10)).all()
assert storage.any_columns['name'][0] == 'byebye'
def test_storage_view_dict_like():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=i) for i in range(4)]
storage = DocVec[MyDoc](docs)._storage
view = ColumnStorageView(0, storage)
assert list(view.keys()) == ['id', 'name', 'tensor']
# since boolean value of np array is ambiguous, we iterate manually
for val_view, val_reference in zip(view.values(), ['0', 'hello', np.zeros(10)]):
if isinstance(val_view, np.ndarray):
assert (val_view == val_reference).all()
else:
assert val_view == val_reference
for item_view, item_reference in zip(
view.items(), [('id', '0'), ('name', 'hello'), ('tensor', np.zeros(10))]
):
if isinstance(item_view[1], np.ndarray):
assert item_view[0] == item_reference[0]
assert (item_view[1] == item_reference[1]).all()
else:
assert item_view == item_reference
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_column_storage_init():
class InnerDoc(BaseDoc):
price: int
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
doc: InnerDoc
docs = [
MyDoc(tensor=np.zeros(10), name='hello', doc=InnerDoc(price=i))
for i in range(4)
]
storage = DocVec[MyDoc](docs)._storage
assert (storage.tensor_columns['tensor'] == np.zeros((4, 10))).all()
for name in storage.any_columns['name']:
assert name == 'hello'
inner_docs = storage.doc_columns['doc']
assert isinstance(inner_docs, DocVec[InnerDoc])
for i, doc in enumerate(inner_docs):
assert doc.price == i
def test_column_storage_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=i) for i in range(4)]
storage = DocVec[MyDoc](docs)._storage
view = ColumnStorageView(0, storage)
assert view['id'] == '0'
assert (view['tensor'] == np.zeros(10)).all()
assert view['name'] == 'hello'
view['id'] = 1
view['tensor'] = np.ones(10)
view['name'] = 'byebye'
assert storage.any_columns['id'][0] == 1
assert (storage.tensor_columns['tensor'][0] == np.ones(10)).all()
assert storage.any_columns['name'][0] == 'byebye'
|
import os
import sys
import pytest
import torch
import torchaudio
from torchaudio.prototype.pipelines import CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB_PLUS
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "examples"))
from source_separation.utils.metrics import sdr
@pytest.mark.parametrize(
"bundle,task,channel,expected_score",
[
[CONVTASNET_BASE_LIBRI2MIX, "speech_separation", 1, 8.1373],
[HDEMUCS_HIGH_MUSDB_PLUS, "music_separation", 2, 8.7480],
],
)
def test_source_separation_models(bundle, task, channel, expected_score, mixture_source, clean_sources):
"""Integration test for the source separation pipeline.
Given the mixture waveform with dimensions `(batch, channel, time)`, the pre-trained pipeline generates
the separated sources Tensor with dimensions `(batch, num_sources, time)`.
The test computes the scale-invariant signal-to-distortion ratio (Si-SDR) score in decibel (dB).
Si-SDR score should be equal to or larger than the expected score.
"""
model = bundle.get_model()
mixture_waveform, sample_rate = torchaudio.load(mixture_source)
assert sample_rate == bundle.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms = []
for source in clean_sources:
clean_waveform, sample_rate = torchaudio.load(source)
assert sample_rate == bundle.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms.append(clean_waveform)
mixture_waveform = mixture_waveform.reshape(1, channel, -1)
estimated_sources = model(mixture_waveform)
clean_waveforms = torch.cat(clean_waveforms).unsqueeze(0)
estimated_sources = estimated_sources.reshape(1, -1, clean_waveforms.shape[-1])
sdr_values = sdr(estimated_sources, clean_waveforms).mean()
assert sdr_values >= expected_score
|
import os
import sys
import torch
import torchaudio
from torchaudio.prototype.pipelines import CONVTASNET_BASE_LIBRI2MIX
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "examples"))
from source_separation.utils.metrics import PIT, sdr
def test_source_separation_models(mixture_source, clean_sources):
"""Integration test for the source separation pipeline.
Given the mixture waveform with dimensions `(batch, 1, time)`, the pre-trained pipeline generates
the separated sources Tensor with dimensions `(batch, num_sources, time)`.
The test computes the scale-invariant signal-to-distortion ratio (Si-SDR) score in decibel (dB) with
permutation invariant training (PIT) criterion. PIT computes Si-SDR scores between the estimated sources and the
target sources for all permuations, then returns the highest values as the final output. The final
Si-SDR score should be equal to or larger than the expected score.
"""
BUNDLE = CONVTASNET_BASE_LIBRI2MIX
EXPECTED_SCORE = 8.1373 # expected Si-SDR score.
model = BUNDLE.get_model()
mixture_waveform, sample_rate = torchaudio.load(mixture_source)
assert sample_rate == BUNDLE.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms = []
for source in clean_sources:
clean_waveform, sample_rate = torchaudio.load(source)
assert sample_rate == BUNDLE.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms.append(clean_waveform)
mixture_waveform = mixture_waveform.reshape(1, 1, -1)
estimated_sources = model(mixture_waveform)
clean_waveforms = torch.cat(clean_waveforms).unsqueeze(0)
_sdr_pit = PIT(utility_func=sdr)
sdr_values = _sdr_pit(estimated_sources, clean_waveforms)
assert sdr_values >= EXPECTED_SCORE
|
_base_ = './queryinst_r50_fpn_ms-480-800-3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True,
rpn=None,
rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5)))
# augmentation strategy originates from DETR.
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './queryinst_r50_fpn_ms-480-800-3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True,
rpn=None,
rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5)))
# augmentation strategy originates from DETR.
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
"""Custom query engine."""
from abc import abstractmethod
from typing import Union
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.response.schema import RESPONSE_TYPE, Response
from llama_index.core.bridge.pydantic import BaseModel, Field, ConfigDict
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.prompts.mixin import PromptMixinType
from llama_index.core.schema import QueryBundle, QueryType
STR_OR_RESPONSE_TYPE = Union[RESPONSE_TYPE, str]
class CustomQueryEngine(BaseModel, BaseQueryEngine):
"""
Custom query engine.
Subclasses can define additional attributes as Pydantic fields.
Subclasses must implement the `custom_query` method, which takes a query string
and returns either a Response object or a string as output.
They can optionally implement the `acustom_query` method for async support.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
callback_manager: CallbackManager = Field(
default_factory=lambda: CallbackManager([]), exclude=True
)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
def query(self, str_or_query_bundle: QueryType) -> RESPONSE_TYPE:
with self.callback_manager.as_trace("query"):
# if query bundle, just run the query
if isinstance(str_or_query_bundle, QueryBundle):
query_str = str_or_query_bundle.query_str
else:
query_str = str_or_query_bundle
raw_response = self.custom_query(query_str)
return (
Response(raw_response)
if isinstance(raw_response, str)
else raw_response
)
async def aquery(self, str_or_query_bundle: QueryType) -> RESPONSE_TYPE:
with self.callback_manager.as_trace("query"):
if isinstance(str_or_query_bundle, QueryBundle):
query_str = str_or_query_bundle.query_str
else:
query_str = str_or_query_bundle
raw_response = await self.acustom_query(query_str)
return (
Response(raw_response)
if isinstance(raw_response, str)
else raw_response
)
@abstractmethod
def custom_query(self, query_str: str) -> STR_OR_RESPONSE_TYPE:
"""Run a custom query."""
async def acustom_query(self, query_str: str) -> STR_OR_RESPONSE_TYPE:
"""Run a custom query asynchronously."""
# by default, just run the synchronous version
return self.custom_query(query_str)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
raise NotImplementedError("This query engine does not support _query.")
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
raise NotImplementedError("This query engine does not support _aquery.")
|
"""Custom query engine."""
from abc import abstractmethod
from typing import Union
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.response.schema import RESPONSE_TYPE, Response
from llama_index.core.bridge.pydantic import BaseModel, Field, ConfigDict
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.prompts.mixin import PromptMixinType
from llama_index.core.schema import QueryBundle, QueryType
STR_OR_RESPONSE_TYPE = Union[RESPONSE_TYPE, str]
class CustomQueryEngine(BaseModel, BaseQueryEngine):
"""Custom query engine.
Subclasses can define additional attributes as Pydantic fields.
Subclasses must implement the `custom_query` method, which takes a query string
and returns either a Response object or a string as output.
They can optionally implement the `acustom_query` method for async support.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
callback_manager: CallbackManager = Field(
default_factory=lambda: CallbackManager([]), exclude=True
)
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
def query(self, str_or_query_bundle: QueryType) -> RESPONSE_TYPE:
with self.callback_manager.as_trace("query"):
# if query bundle, just run the query
if isinstance(str_or_query_bundle, QueryBundle):
query_str = str_or_query_bundle.query_str
else:
query_str = str_or_query_bundle
raw_response = self.custom_query(query_str)
return (
Response(raw_response)
if isinstance(raw_response, str)
else raw_response
)
async def aquery(self, str_or_query_bundle: QueryType) -> RESPONSE_TYPE:
with self.callback_manager.as_trace("query"):
if isinstance(str_or_query_bundle, QueryBundle):
query_str = str_or_query_bundle.query_str
else:
query_str = str_or_query_bundle
raw_response = await self.acustom_query(query_str)
return (
Response(raw_response)
if isinstance(raw_response, str)
else raw_response
)
@abstractmethod
def custom_query(self, query_str: str) -> STR_OR_RESPONSE_TYPE:
"""Run a custom query."""
async def acustom_query(self, query_str: str) -> STR_OR_RESPONSE_TYPE:
"""Run a custom query asynchronously."""
# by default, just run the synchronous version
return self.custom_query(query_str)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
raise NotImplementedError("This query engine does not support _query.")
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
raise NotImplementedError("This query engine does not support _aquery.")
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Union
from mmengine.optim import _ParamScheduler
from mmengine.registry import HOOKS
from mmengine.utils import is_list_of
from .hook import Hook
DATA_BATCH = Optional[Union[dict, tuple, list]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g., learning rate
and momentum."""
priority = 'LOW'
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Call step function for each scheduler after each training iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (dict or tuple or list, optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here.
"""
if runner.param_schedulers is None:
return
def step(param_schedulers):
assert isinstance(param_schedulers, list)
for scheduler in param_schedulers:
if not scheduler.by_epoch:
scheduler.step()
if isinstance(runner.param_schedulers, list):
step(runner.param_schedulers)
elif isinstance(runner.param_schedulers, dict):
for param_schedulers in runner.param_schedulers.values():
step(param_schedulers)
else:
raise TypeError(
'runner.param_schedulers should be list of ParamScheduler or '
'a dict containing list of ParamScheduler, '
f'but got {runner.param_schedulers}')
def after_train_epoch(self, runner) -> None:
"""Call step function for each scheduler after each training epoch.
Args:
runner (Runner): The runner of the training process.
"""
if runner.param_schedulers is None:
return
def step(param_schedulers):
assert isinstance(param_schedulers, list)
for scheduler in param_schedulers:
if scheduler.by_epoch:
scheduler.step()
if isinstance(runner.param_schedulers, list):
step(runner.param_schedulers)
elif isinstance(runner.param_schedulers, dict):
for param_schedulers in runner.param_schedulers.values():
step(param_schedulers)
else:
raise TypeError(
'runner.param_schedulers should be list of ParamScheduler or '
'a dict containing list of ParamScheduler, '
f'but got {runner.param_schedulers}')
def after_val_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""Call step function for each scheduler which has attribute
``need_val_args`` after each validation epoch.
Args:
runner (Runner): The runner of the validation process.
metrics (Dict[str, float], optional): Evaluation results of all
metrics on validation dataset. The keys are the names of the
metrics, and the values are corresponding results.
Note:
if ``runner.param_schedulers`` is not built before,
the hook ``after_val_epoch`` will be skipped.
"""
if runner.param_schedulers is None:
return
# avoid counting scheduler._global_step
# it has counted in after_train_* hook
if metrics is None:
return
def step(param_schedulers):
# check param_schedulers is list and built
if not is_list_of(param_schedulers, _ParamScheduler):
return
for scheduler in param_schedulers:
if (scheduler.by_epoch
and getattr(scheduler, 'need_val_args', False)):
scheduler.step(metrics)
if isinstance(runner.param_schedulers, list):
step(runner.param_schedulers)
elif isinstance(runner.param_schedulers, dict):
for param_schedulers in runner.param_schedulers.values():
step(param_schedulers)
else:
raise TypeError(
'runner.param_schedulers should be list of ParamScheduler or '
'a dict containing list of ParamScheduler, '
f'but got {runner.param_schedulers}')
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Union[dict, tuple, list]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g., learning rate
and momentum."""
priority = 'LOW'
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (dict or tuple or list, optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here.
"""
def step(param_schedulers):
assert isinstance(param_schedulers, list)
for scheduler in param_schedulers:
if not scheduler.by_epoch:
scheduler.step()
if runner.param_schedulers is None:
return
if isinstance(runner.param_schedulers, list):
step(runner.param_schedulers)
elif isinstance(runner.param_schedulers, dict):
for param_schedulers in runner.param_schedulers.values():
step(param_schedulers)
else:
raise TypeError(
'runner.param_schedulers should be list of ParamScheduler or '
'a dict containing list of ParamScheduler, '
f'but got {runner.param_schedulers}')
def after_train_epoch(self, runner) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (Runner): The runner of the training process.
"""
def step(param_schedulers):
assert isinstance(param_schedulers, list)
for scheduler in param_schedulers:
if scheduler.by_epoch:
scheduler.step()
if runner.param_schedulers is None:
return
if isinstance(runner.param_schedulers, list):
step(runner.param_schedulers)
elif isinstance(runner.param_schedulers, dict):
for param_schedulers in runner.param_schedulers.values():
step(param_schedulers)
else:
raise TypeError(
'runner.param_schedulers should be list of ParamScheduler or '
'a dict containing list of ParamScheduler, '
f'but got {runner.param_schedulers}')
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from torchaudio_unittest.models.emformer.emformer_test_impl import EmformerTestImpl
@skipIfNoCuda
class EmformerFloat32GPUTest(EmformerTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class EmformerFloat64GPUTest(EmformerTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
import torch
from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase
from torchaudio_unittest.models.emformer.emformer_test_impl import EmformerTestImpl
@skipIfNoCuda
class EmformerFloat32GPUTest(EmformerTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class EmformerFloat64GPUTest(EmformerTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
_base_ = './retinanet_r50-caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './retinanet_r50-caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
"""Unit tests for verifying event dispatching.
Much of this code is indirectly tested already through many end-to-end tests
that generate traces based on the callbacks. The traces are all verified
via snapshot testing (e.g., see unit tests for runnables).
"""
import contextvars
from contextlib import asynccontextmanager
from typing import Any, Optional
from uuid import UUID
from langchain_core.callbacks import (
AsyncCallbackHandler,
AsyncCallbackManager,
BaseCallbackHandler,
)
async def test_inline_handlers_share_parent_context() -> None:
"""Verify that handlers that are configured to run_inline can update parent context.
This test was created because some of the inline handlers were getting
their own context as the handling logic was kicked off using asyncio.gather
which does not automatically propagate the parent context (by design).
This issue was affecting only a few specific handlers:
* on_llm_start
* on_chat_model_start
which in some cases were triggered with multiple prompts and as a result
triggering multiple tasks that were launched in parallel.
"""
some_var: contextvars.ContextVar[str] = contextvars.ContextVar("some_var")
class CustomHandler(AsyncCallbackHandler):
"""A handler that sets the context variable.
The handler sets the context variable to the name of the callback that was
called.
"""
def __init__(self, *, run_inline: bool) -> None:
"""Initialize the handler."""
self.run_inline = run_inline
async def on_llm_start(self, *args: Any, **kwargs: Any) -> None:
"""Update the callstack with the name of the callback."""
some_var.set("on_llm_start")
# The manager serves as a callback dispatcher.
# It's responsible for dispatching callbacks to all registered handlers.
manager = AsyncCallbackManager(handlers=[CustomHandler(run_inline=True)])
# Check on_llm_start
some_var.set("unset")
await manager.on_llm_start({}, ["prompt 1"])
assert some_var.get() == "on_llm_start"
# Check what happens when run_inline is False
# We don't expect the context to be updated
manager2 = AsyncCallbackManager(
handlers=[
CustomHandler(run_inline=False),
]
)
some_var.set("unset")
await manager2.on_llm_start({}, ["prompt 1"])
# Will not be updated because the handler is not inline
assert some_var.get() == "unset"
async def test_inline_handlers_share_parent_context_multiple() -> None:
"""A slightly more complex variation of the test unit test above.
This unit test verifies that things work correctly when there are multiple prompts,
and multiple handlers that are configured to run inline.
"""
counter_var = contextvars.ContextVar("counter", default=0)
shared_stack = []
@asynccontextmanager
async def set_counter_var() -> Any:
token = counter_var.set(0)
try:
yield
finally:
counter_var.reset(token)
class StatefulAsyncCallbackHandler(AsyncCallbackHandler):
def __init__(self, name: str, *, run_inline: bool = True):
self.name = name
self.run_inline = run_inline
async def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
if self.name == "StateModifier":
current_counter = counter_var.get()
counter_var.set(current_counter + 1)
state = counter_var.get()
elif self.name == "StateReader":
state = counter_var.get()
else:
state = None
shared_stack.append(state)
await super().on_llm_start(
serialized,
prompts,
run_id=run_id,
parent_run_id=parent_run_id,
**kwargs,
)
handlers: list[BaseCallbackHandler] = [
StatefulAsyncCallbackHandler("StateModifier", run_inline=True),
StatefulAsyncCallbackHandler("StateReader", run_inline=True),
StatefulAsyncCallbackHandler("NonInlineHandler", run_inline=False),
]
prompts = ["Prompt1", "Prompt2", "Prompt3"]
async with set_counter_var():
shared_stack.clear()
manager = AsyncCallbackManager(handlers=handlers)
await manager.on_llm_start({}, prompts)
# Assert the order of states
states = [entry for entry in shared_stack if entry is not None]
assert states == [
1,
1,
2,
2,
3,
3,
], f"Expected order of states was broken due to context loss. Got {states}"
|
"""Unit tests for verifying event dispatching.
Much of this code is indirectly tested already through many end-to-end tests
that generate traces based on the callbacks. The traces are all verified
via snapshot testing (e.g., see unit tests for runnables).
"""
import contextvars
from contextlib import asynccontextmanager
from typing import Any, Optional
from uuid import UUID
from langchain_core.callbacks import (
AsyncCallbackHandler,
AsyncCallbackManager,
BaseCallbackHandler,
)
async def test_inline_handlers_share_parent_context() -> None:
"""Verify that handlers that are configured to run_inline can update parent context.
This test was created because some of the inline handlers were getting
their own context as the handling logic was kicked off using asyncio.gather
which does not automatically propagate the parent context (by design).
This issue was affecting only a few specific handlers:
* on_llm_start
* on_chat_model_start
which in some cases were triggered with multiple prompts and as a result
triggering multiple tasks that were launched in parallel.
"""
some_var: contextvars.ContextVar[str] = contextvars.ContextVar("some_var")
class CustomHandler(AsyncCallbackHandler):
"""A handler that sets the context variable.
The handler sets the context variable to the name of the callback that was
called.
"""
def __init__(self, run_inline: bool) -> None:
"""Initialize the handler."""
self.run_inline = run_inline
async def on_llm_start(self, *args: Any, **kwargs: Any) -> None:
"""Update the callstack with the name of the callback."""
some_var.set("on_llm_start")
# The manager serves as a callback dispatcher.
# It's responsible for dispatching callbacks to all registered handlers.
manager = AsyncCallbackManager(handlers=[CustomHandler(run_inline=True)])
# Check on_llm_start
some_var.set("unset")
await manager.on_llm_start({}, ["prompt 1"])
assert some_var.get() == "on_llm_start"
# Check what happens when run_inline is False
# We don't expect the context to be updated
manager2 = AsyncCallbackManager(
handlers=[
CustomHandler(run_inline=False),
]
)
some_var.set("unset")
await manager2.on_llm_start({}, ["prompt 1"])
# Will not be updated because the handler is not inline
assert some_var.get() == "unset"
async def test_inline_handlers_share_parent_context_multiple() -> None:
"""A slightly more complex variation of the test unit test above.
This unit test verifies that things work correctly when there are multiple prompts,
and multiple handlers that are configured to run inline.
"""
counter_var = contextvars.ContextVar("counter", default=0)
shared_stack = []
@asynccontextmanager
async def set_counter_var() -> Any:
token = counter_var.set(0)
try:
yield
finally:
counter_var.reset(token)
class StatefulAsyncCallbackHandler(AsyncCallbackHandler):
def __init__(self, name: str, run_inline: bool = True):
self.name = name
self.run_inline = run_inline
async def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
if self.name == "StateModifier":
current_counter = counter_var.get()
counter_var.set(current_counter + 1)
state = counter_var.get()
elif self.name == "StateReader":
state = counter_var.get()
else:
state = None
shared_stack.append(state)
await super().on_llm_start(
serialized,
prompts,
run_id=run_id,
parent_run_id=parent_run_id,
**kwargs,
)
handlers: list[BaseCallbackHandler] = [
StatefulAsyncCallbackHandler("StateModifier", run_inline=True),
StatefulAsyncCallbackHandler("StateReader", run_inline=True),
StatefulAsyncCallbackHandler("NonInlineHandler", run_inline=False),
]
prompts = ["Prompt1", "Prompt2", "Prompt3"]
async with set_counter_var():
shared_stack.clear()
manager = AsyncCallbackManager(handlers=handlers)
await manager.on_llm_start({}, prompts)
# Assert the order of states
states = [entry for entry in shared_stack if entry is not None]
assert states == [
1,
1,
2,
2,
3,
3,
], f"Expected order of states was broken due to context loss. Got {states}"
|
# Owner(s): ["module: inductor"]
import sys
import unittest
from torch.testing._internal.common_utils import IS_CI, IS_WINDOWS, skipIfXpu
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU, requires_gpu
if IS_WINDOWS and IS_CI:
sys.stderr.write(
"Windows CI does not have necessary dependencies for test_memory_planning yet\n"
)
if __name__ == "__main__":
sys.exit(0)
raise unittest.SkipTest("requires sympy/functorch/filelock") # noqa: F821
import torch
from torch._C import FileCheck
from torch._dynamo.utils import same
from torch._inductor import config
from torch._inductor.test_case import run_tests, TestCase
from torch._inductor.utils import run_and_get_cpp_code
from torch.export import Dim
@requires_gpu()
@config.patch(memory_planning=True)
class TestMemoryPlanning(TestCase):
device = GPU_TYPE
def _generate(self, *, device):
"""
Generate a simple test case that has multiple simultaneously-live intermediate tensors.
"""
class Foo(torch.nn.Module):
def forward(self, x, y, z):
t0 = x.matmul(y)
t1 = x.matmul(z)
t0 = x.transpose(0, 1).matmul(t1)
t1 = x.matmul(t0)
return t0.sum() + t1.sum()
x = torch.randn((3, 2), device=device)
y = torch.randn((2, 4), device=device)
z = torch.randn((2, 3), device=device)
return (Foo(), (x, y, z))
def test_python_wrapper(self):
f, args = self._generate(device=GPU_TYPE)
compiled = torch.compile(f, dynamic=True)
result, code = run_and_get_cpp_code(compiled, *args)
FileCheck().check(
"pool1 = empty_strided_"
+ GPU_TYPE
+ "((4*s27*s77 + align(4*s77*s77), ), (1, )"
).check_next(
"buf0 = alloc_from_pool(pool1, 0, torch.float32, (s77, s77), (s77, 1))"
).check("buf1 = alloc_from_pool(pool1, align(4*s77*s77),").run(code)
self.assertTrue(same(f(*args), result))
def test_cpp_wrapper(self):
f, args = self._generate(device=GPU_TYPE)
compiled = torch.compile(f, dynamic=True)
with config.patch({"cpp_wrapper": True}):
result, code = run_and_get_cpp_code(compiled, *args)
FileCheck().check(
"aoti_torch__alloc_from_pool(pool1, 0, cached_torch_dtype_float32, 2, int_array_2, int_array_3, &tmp_tensor_handle_0)"
).check_next("auto buf0 = RAIIAtenTensorHandle(tmp_tensor_handle_0);").check(
"auto buf1 = RAIIAtenTensorHandle(tmp_tensor_handle_1);"
).run(code)
self.assertTrue(same(f(*args), result))
@skipIfXpu(msg="aoti doesn't work on XPU")
def test_aoti(self):
try:
from .test_aot_inductor import AOTIRunnerUtil
except ImportError:
from test_aot_inductor import ( # @manual=fbcode//caffe2/test/inductor:test_aot_inductor-library
AOTIRunnerUtil,
)
f, args = self._generate(device=GPU_TYPE)
dim0_x = Dim("dim0_x", min=1, max=2048)
dynamic_shapes = ({0: dim0_x}, None, None)
result, code = run_and_get_cpp_code(
lambda: AOTIRunnerUtil.run(f, args, dynamic_shapes=dynamic_shapes)
)
FileCheck().check(
"int64_t int_array_0[] = {24L + align(12L*s77), };"
).check_next("int64_t int_array_1[] = {1L, };").check_next(
"AtenTensorHandle pool1_handle;"
).check_next(
"aoti_torch_empty_strided(1, int_array_0, int_array_1,"
).check_next("RAIIAtenTensorHandle pool1(pool1_handle);").check_next(
"int64_t int_array_2[] = {s77, 3L};"
).check_next("int64_t int_array_3[] = {3L, 1L};").check_next(
"AtenTensorHandle tmp_tensor_handle_0;"
).check_next("aoti_torch__alloc_from_pool(pool1, 0").run(code)
self.assertTrue(same(f(*args), result))
if __name__ == "__main__":
if HAS_GPU:
run_tests()
|
# Owner(s): ["module: inductor"]
import sys
import unittest
from torch.testing._internal.common_utils import IS_CI, IS_WINDOWS, skipIfXpu
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU, requires_gpu
if IS_WINDOWS and IS_CI:
sys.stderr.write(
"Windows CI does not have necessary dependencies for test_memory_planning yet\n"
)
if __name__ == "__main__":
sys.exit(0)
raise unittest.SkipTest("requires sympy/functorch/filelock") # noqa: F821
import torch
from torch._C import FileCheck
from torch._dynamo.utils import same
from torch._inductor import config
from torch._inductor.test_case import run_tests, TestCase
from torch._inductor.utils import run_and_get_cpp_code
from torch.export import Dim
@requires_gpu()
@config.patch(memory_planning=True)
class TestMemoryPlanning(TestCase):
device = GPU_TYPE
def _generate(self, *, device):
"""
Generate a simple test case that has multiple simultaneously-live intermediate tensors.
"""
class Foo(torch.nn.Module):
def forward(self, x, y, z):
t0 = x.matmul(y)
t1 = x.matmul(z)
t0 = x.transpose(0, 1).matmul(t1)
t1 = x.matmul(t0)
return t0.sum() + t1.sum()
x = torch.randn((3, 2), device=device)
y = torch.randn((2, 4), device=device)
z = torch.randn((2, 3), device=device)
return (Foo(), (x, y, z))
def test_python_wrapper(self):
f, args = self._generate(device=GPU_TYPE)
compiled = torch.compile(f, dynamic=True)
result, code = run_and_get_cpp_code(compiled, *args)
FileCheck().check(
"pool1 = empty_strided_"
+ GPU_TYPE
+ "((4*s27*s77 + align(4*s77*s77), ), (1, )"
).check_next(
"buf0 = alloc_from_pool(pool1, 0, torch.float32, (s77, s77), (s77, 1))"
).check(
"buf1 = alloc_from_pool(pool1, align(4*s77*s77),"
).run(
code
)
self.assertTrue(same(f(*args), result))
def test_cpp_wrapper(self):
f, args = self._generate(device=GPU_TYPE)
compiled = torch.compile(f, dynamic=True)
with config.patch({"cpp_wrapper": True}):
result, code = run_and_get_cpp_code(compiled, *args)
FileCheck().check(
"aoti_torch__alloc_from_pool(pool1, 0, cached_torch_dtype_float32, 2, int_array_2, int_array_3, &tmp_tensor_handle_0)"
).check_next("auto buf0 = RAIIAtenTensorHandle(tmp_tensor_handle_0);").check(
"auto buf1 = RAIIAtenTensorHandle(tmp_tensor_handle_1);"
).run(
code
)
self.assertTrue(same(f(*args), result))
@skipIfXpu(msg="aoti doesn't work on XPU")
def test_aoti(self):
try:
from .test_aot_inductor import AOTIRunnerUtil
except ImportError:
from test_aot_inductor import ( # @manual=fbcode//caffe2/test/inductor:test_aot_inductor-library
AOTIRunnerUtil,
)
f, args = self._generate(device=GPU_TYPE)
dim0_x = Dim("dim0_x", min=1, max=2048)
dynamic_shapes = ({0: dim0_x}, None, None)
result, code = run_and_get_cpp_code(
lambda: AOTIRunnerUtil.run(f, args, dynamic_shapes=dynamic_shapes)
)
FileCheck().check(
"int64_t int_array_0[] = {24L + align(12L*s77), };"
).check_next("int64_t int_array_1[] = {1L, };").check_next(
"AtenTensorHandle pool1_handle;"
).check_next(
"aoti_torch_empty_strided(1, int_array_0, int_array_1,"
).check_next(
"RAIIAtenTensorHandle pool1(pool1_handle);"
).check_next(
"int64_t int_array_2[] = {s77, 3L};"
).check_next(
"int64_t int_array_3[] = {3L, 1L};"
).check_next(
"AtenTensorHandle tmp_tensor_handle_0;"
).check_next(
"aoti_torch__alloc_from_pool(pool1, 0"
).run(
code
)
self.assertTrue(same(f(*args), result))
if __name__ == "__main__":
if HAS_GPU:
run_tests()
|
_base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='MobileNetV2',
out_indices=(2, 4, 6),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://mmdet/mobilenet_v2')),
neck=dict(
type='YOLOV3Neck',
num_scales=3,
in_channels=[320, 96, 32],
out_channels=[96, 96, 96]),
bbox_head=dict(
type='YOLOV3Head',
num_classes=80,
in_channels=[96, 96, 96],
out_channels=[96, 96, 96],
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=2.0,
reduction='sum'),
loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(
type='Resize',
img_scale=[(320, 320), (416, 416)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=24,
workers_per_gpu=4,
train=dict(
type='RepeatDataset', # use RepeatDataset to speed up training
times=10,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=4000,
warmup_ratio=0.0001,
step=[24, 28])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=30)
evaluation = dict(interval=1, metric=['bbox'])
find_unused_parameters = True
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (24 samples per GPU)
auto_scale_lr = dict(base_batch_size=192)
|
_base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='MobileNetV2',
out_indices=(2, 4, 6),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://mmdet/mobilenet_v2')),
neck=dict(
type='YOLOV3Neck',
num_scales=3,
in_channels=[320, 96, 32],
out_channels=[96, 96, 96]),
bbox_head=dict(
type='YOLOV3Head',
num_classes=80,
in_channels=[96, 96, 96],
out_channels=[96, 96, 96],
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=2.0,
reduction='sum'),
loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(
type='Resize',
img_scale=[(320, 320), (416, 416)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=24,
workers_per_gpu=4,
train=dict(
type='RepeatDataset', # use RepeatDataset to speed up training
times=10,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=4000,
warmup_ratio=0.0001,
step=[24, 28])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=30)
evaluation = dict(interval=1, metric=['bbox'])
find_unused_parameters = True
|
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.output_parsers.base import (
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
)
from langchain_core.output_parsers.json import (
JsonOutputParser,
SimpleJsonOutputParser,
)
from langchain_core.output_parsers.list import (
CommaSeparatedListOutputParser,
ListOutputParser,
MarkdownListOutputParser,
NumberedListOutputParser,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
from langchain_core.output_parsers.pydantic import PydanticOutputParser
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.output_parsers.transform import (
BaseCumulativeTransformOutputParser,
BaseTransformOutputParser,
)
from langchain_core.output_parsers.xml import XMLOutputParser
__all__ = [
"BaseLLMOutputParser",
"BaseGenerationOutputParser",
"BaseOutputParser",
"ListOutputParser",
"CommaSeparatedListOutputParser",
"NumberedListOutputParser",
"MarkdownListOutputParser",
"StrOutputParser",
"BaseTransformOutputParser",
"BaseCumulativeTransformOutputParser",
"SimpleJsonOutputParser",
"XMLOutputParser",
"JsonOutputParser",
"PydanticOutputParser",
"JsonOutputToolsParser",
"JsonOutputKeyToolsParser",
"PydanticToolsParser",
]
_dynamic_imports = {
"BaseLLMOutputParser": "base",
"BaseGenerationOutputParser": "base",
"BaseOutputParser": "base",
"JsonOutputParser": "json",
"SimpleJsonOutputParser": "json",
"ListOutputParser": "list",
"CommaSeparatedListOutputParser": "list",
"MarkdownListOutputParser": "list",
"NumberedListOutputParser": "list",
"JsonOutputKeyToolsParser": "openai_tools",
"JsonOutputToolsParser": "openai_tools",
"PydanticToolsParser": "openai_tools",
"PydanticOutputParser": "pydantic",
"StrOutputParser": "string",
"BaseTransformOutputParser": "transform",
"BaseCumulativeTransformOutputParser": "transform",
"XMLOutputParser": "xml",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return __all__
|
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.output_parsers.base import (
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
)
from langchain_core.output_parsers.json import (
JsonOutputParser,
SimpleJsonOutputParser,
)
from langchain_core.output_parsers.list import (
CommaSeparatedListOutputParser,
ListOutputParser,
MarkdownListOutputParser,
NumberedListOutputParser,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
from langchain_core.output_parsers.pydantic import PydanticOutputParser
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.output_parsers.transform import (
BaseCumulativeTransformOutputParser,
BaseTransformOutputParser,
)
from langchain_core.output_parsers.xml import XMLOutputParser
__all__ = [
"BaseLLMOutputParser",
"BaseGenerationOutputParser",
"BaseOutputParser",
"ListOutputParser",
"CommaSeparatedListOutputParser",
"NumberedListOutputParser",
"MarkdownListOutputParser",
"StrOutputParser",
"BaseTransformOutputParser",
"BaseCumulativeTransformOutputParser",
"SimpleJsonOutputParser",
"XMLOutputParser",
"JsonOutputParser",
"PydanticOutputParser",
"JsonOutputToolsParser",
"JsonOutputKeyToolsParser",
"PydanticToolsParser",
]
_dynamic_imports = {
"BaseLLMOutputParser": "base",
"BaseGenerationOutputParser": "base",
"BaseOutputParser": "base",
"JsonOutputParser": "json",
"SimpleJsonOutputParser": "json",
"ListOutputParser": "list",
"CommaSeparatedListOutputParser": "list",
"MarkdownListOutputParser": "list",
"NumberedListOutputParser": "list",
"JsonOutputKeyToolsParser": "openai_tools",
"JsonOutputToolsParser": "openai_tools",
"PydanticToolsParser": "openai_tools",
"PydanticOutputParser": "pydantic",
"StrOutputParser": "string",
"BaseTransformOutputParser": "transform",
"BaseCumulativeTransformOutputParser": "transform",
"XMLOutputParser": "xml",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return __all__
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
"""
This evaluator extends :class:`TranslationEvaluator` but is specifically designed for sparse encoder models.
Given two sets of sentences in different languages, e.g. (en_1, en_2, en_3...) and (fr_1, fr_2, fr_3, ...),
and assuming that fr_i is the translation of en_i.
Checks if vec(en_i) has the highest similarity to vec(fr_i). Computes the accuracy in both directions
The labels need to indicate the similarity between the sentences.
Args:
source_sentences (List[str]): List of sentences in the source language.
target_sentences (List[str]): List of sentences in the target language.
show_progress_bar (bool): Whether to show a progress bar when computing embeddings. Defaults to False.
batch_size (int): The batch size to compute sentence embeddings. Defaults to 16.
name (str): The name of the evaluator. Defaults to an empty string.
print_wrong_matches (bool): Whether to print incorrect matches. Defaults to False.
write_csv (bool): Whether to write the evaluation results to a CSV file. Defaults to True.
truncate_dim (int, optional): The dimension to truncate sentence embeddings to. If None, the model's
current truncation dimension will be used. Defaults to None.
Example:
::
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseTranslationEvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
'''
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
'''
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
"""
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
"""
This evaluator extends TranslationEvaluator but is specifically designed for sparse encoder models.
Given two sets of sentences in different languages, e.g. (en_1, en_2, en_3...) and (fr_1, fr_2, fr_3, ...),
and assuming that fr_i is the translation of en_i.
Checks if vec(en_i) has the highest similarity to vec(fr_i). Computes the accuracy in both directions
The labels need to indicate the similarity between the sentences.
Args:
source_sentences (List[str]): List of sentences in the source language.
target_sentences (List[str]): List of sentences in the target language.
show_progress_bar (bool): Whether to show a progress bar when computing embeddings. Defaults to False.
batch_size (int): The batch size to compute sentence embeddings. Defaults to 16.
name (str): The name of the evaluator. Defaults to an empty string.
print_wrong_matches (bool): Whether to print incorrect matches. Defaults to False.
write_csv (bool): Whether to write the evaluation results to a CSV file. Defaults to True.
truncate_dim (int, optional): The dimension to truncate sentence embeddings to. If None, the model's
current truncation dimension will be used. Defaults to None.
Example:
::
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseTranslationEvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
'''
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
'''
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
"""
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
"""Module containing the base parser for arguments of Jina."""
import argparse
from jina.parsers.helper import _chf
def set_base_parser():
"""Set the base parser
:return: the parser
"""
from jina import __version__
from jina.helper import colored, format_full_version_info, get_full_version
# create the top-level parser
urls = {
'Code': ('💻', 'https://oss.jina.ai'),
'Docs': ('📖', 'https://docs.jina.ai'),
'Help': ('💬', 'https://discord.jina.ai'),
'Hiring!': ('🙌', 'https://jobs.jina.ai'),
}
url_str = '\n'.join(
f'- {v[0]:<10} {k:10.10}\t{colored(v[1], "cyan", attrs=["underline"])}'
for k, v in urls.items()
)
parser = argparse.ArgumentParser(
epilog=f'''
Jina v{colored(__version__, "green")}: Build multimodal AI services via cloud native technologies.
{url_str}
''',
formatter_class=_chf,
)
parser.add_argument(
'-v',
'--version',
action='version',
version=__version__,
help='Show Jina version',
)
parser.add_argument(
'-vf',
'--version-full',
action='version',
version=format_full_version_info(*get_full_version()),
help='Show Jina and all dependencies\' versions',
)
return parser
|
"""Module containing the base parser for arguments of Jina."""
import argparse
from jina.parsers.helper import _chf
def set_base_parser():
"""Set the base parser
:return: the parser
"""
from jina import __version__
from jina.helper import colored, format_full_version_info, get_full_version
# create the top-level parser
urls = {
'Code': ('💻', 'https://oss.jina.ai'),
'Docs': ('📖', 'https://docs.jina.ai'),
'Help': ('💬', 'https://discord.jina.ai'),
'Hiring!': ('🙌', 'https://jobs.jina.ai'),
}
url_str = '\n'.join(
f'- {v[0]:<10} {k:10.10}\t{colored(v[1], "cyan", attrs=["underline"])}'
for k, v in urls.items()
)
parser = argparse.ArgumentParser(
epilog=f'''
Jina v{colored(__version__, "green")}: Build multimodal AI services via cloud native technologies.
{url_str}
''',
formatter_class=_chf,
)
parser.add_argument(
'-v',
'--version',
action='version',
version=__version__,
help='Show Jina version',
)
parser.add_argument(
'-vf',
'--version-full',
action='version',
version=format_full_version_info(*get_full_version()),
help='Show Jina and all dependencies\' versions',
)
return parser
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import unittest
from unittest.mock import MagicMock, patch
import pytest
from mmdet.datasets import DATASETS
@patch('mmdet.datasets.CocoDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CustomDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.XMLDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CityscapesDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CocoDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.CustomDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.XMLDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.CityscapesDataset._filter_imgs', MagicMock)
@pytest.mark.parametrize('dataset',
['CocoDataset', 'VOCDataset', 'CityscapesDataset'])
def test_custom_classes_override_default(dataset):
dataset_class = DATASETS.get(dataset)
if dataset in ['CocoDataset', 'CityscapesDataset']:
dataset_class.coco = MagicMock()
dataset_class.cat_ids = MagicMock()
original_classes = dataset_class.CLASSES
# Test setting classes as a tuple
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=('bus', 'car'),
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ('bus', 'car')
print(custom_dataset)
# Test setting classes as a list
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=['bus', 'car'],
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['bus', 'car']
print(custom_dataset)
# Test overriding not a subset
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=['foo'],
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['foo']
print(custom_dataset)
# Test default behavior
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=None,
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES == original_classes
print(custom_dataset)
# Test sending file path
import tempfile
tmp_file = tempfile.NamedTemporaryFile()
with open(tmp_file.name, 'w') as f:
f.write('bus\ncar\n')
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=tmp_file.name,
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
tmp_file.close()
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['bus', 'car']
print(custom_dataset)
class CustomDatasetTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.data_dir = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'data')
self.dataset_class = DATASETS.get('XMLDataset')
def test_data_infos__default_db_directories(self):
"""Test correct data read having a Pacal-VOC directory structure."""
test_dataset_root = os.path.join(self.data_dir, 'VOCdevkit', 'VOC2007')
custom_ds = self.dataset_class(
data_root=test_dataset_root,
ann_file=os.path.join(test_dataset_root, 'ImageSets', 'Main',
'trainval.txt'),
pipeline=[],
classes=('person', 'dog'),
test_mode=True)
self.assertListEqual([{
'id': '000001',
'filename': 'JPEGImages/000001.jpg',
'width': 353,
'height': 500
}], custom_ds.data_infos)
def test_data_infos__overridden_db_subdirectories(self):
"""Test correct data read having a customized directory structure."""
test_dataset_root = os.path.join(self.data_dir, 'custom_dataset')
custom_ds = self.dataset_class(
data_root=test_dataset_root,
ann_file=os.path.join(test_dataset_root, 'trainval.txt'),
pipeline=[],
classes=('person', 'dog'),
test_mode=True,
img_prefix='',
img_subdir='images',
ann_subdir='images')
self.assertListEqual([{
'id': '000001',
'filename': 'images/000001.jpg',
'width': 353,
'height': 500
}], custom_ds.data_infos)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import MagicMock, patch
import pytest
from mmdet.datasets import DATASETS
@patch('mmdet.datasets.CocoDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CustomDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.XMLDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CityscapesDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CocoDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.CustomDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.XMLDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.CityscapesDataset._filter_imgs', MagicMock)
@pytest.mark.parametrize('dataset',
['CocoDataset', 'VOCDataset', 'CityscapesDataset'])
def test_custom_classes_override_default(dataset):
dataset_class = DATASETS.get(dataset)
if dataset in ['CocoDataset', 'CityscapesDataset']:
dataset_class.coco = MagicMock()
dataset_class.cat_ids = MagicMock()
original_classes = dataset_class.CLASSES
# Test setting classes as a tuple
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=('bus', 'car'),
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ('bus', 'car')
print(custom_dataset)
# Test setting classes as a list
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=['bus', 'car'],
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['bus', 'car']
print(custom_dataset)
# Test overriding not a subset
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=['foo'],
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['foo']
print(custom_dataset)
# Test default behavior
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=None,
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES == original_classes
print(custom_dataset)
# Test sending file path
import tempfile
tmp_file = tempfile.NamedTemporaryFile()
with open(tmp_file.name, 'w') as f:
f.write('bus\ncar\n')
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=tmp_file.name,
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
tmp_file.close()
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['bus', 'car']
print(custom_dataset)
|
import json
import logging
from typing import List
from langchain_core._api.deprecation import deprecated
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_DBNAME = "chat_history"
DEFAULT_COLLECTION_NAME = "message_store"
@deprecated(
since="0.0.25",
removal="1.0",
alternative_import="langchain_mongodb.MongoDBChatMessageHistory",
)
class MongoDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in MongoDB.
Args:
connection_string: connection string to connect to MongoDB
session_id: arbitrary key that is used to store the messages
of a single chat session.
database_name: name of the database to use
collection_name: name of the collection to use
create_index: whether to create an index with name SessionId. Set to False if
such an index already exists.
"""
def __init__(
self,
connection_string: str,
session_id: str,
database_name: str = DEFAULT_DBNAME,
collection_name: str = DEFAULT_COLLECTION_NAME,
create_index: bool = True,
):
from pymongo import MongoClient, errors
self.connection_string = connection_string
self.session_id = session_id
self.database_name = database_name
self.collection_name = collection_name
try:
self.client: MongoClient = MongoClient(connection_string)
except errors.ConnectionFailure as error:
logger.error(error)
self.db = self.client[database_name]
self.collection = self.db[collection_name]
if create_index:
self.collection.create_index("SessionId")
@property
def messages(self) -> List[BaseMessage]: # type: ignore[override]
"""Retrieve the messages from MongoDB"""
from pymongo import errors
try:
cursor = self.collection.find({"SessionId": self.session_id})
except errors.OperationFailure as error:
logger.error(error)
if cursor:
items = [json.loads(document["History"]) for document in cursor]
else:
items = []
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in MongoDB"""
from pymongo import errors
try:
self.collection.insert_one(
{
"SessionId": self.session_id,
"History": json.dumps(message_to_dict(message)),
}
)
except errors.WriteError as err:
logger.error(err)
def clear(self) -> None:
"""Clear session memory from MongoDB"""
from pymongo import errors
try:
self.collection.delete_many({"SessionId": self.session_id})
except errors.WriteError as err:
logger.error(err)
|
import json
import logging
from typing import List
from langchain_core._api.deprecation import deprecated
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
logger = logging.getLogger(__name__)
DEFAULT_DBNAME = "chat_history"
DEFAULT_COLLECTION_NAME = "message_store"
@deprecated(
since="0.0.25",
removal="1.0",
alternative_import="langchain_mongodb.MongoDBChatMessageHistory",
)
class MongoDBChatMessageHistory(BaseChatMessageHistory):
"""Chat message history that stores history in MongoDB.
Args:
connection_string: connection string to connect to MongoDB
session_id: arbitrary key that is used to store the messages
of a single chat session.
database_name: name of the database to use
collection_name: name of the collection to use
create_index: whether to create an index with name SessionId. Set to False if
such an index already exists.
"""
def __init__(
self,
connection_string: str,
session_id: str,
database_name: str = DEFAULT_DBNAME,
collection_name: str = DEFAULT_COLLECTION_NAME,
create_index: bool = True,
):
from pymongo import MongoClient, errors
self.connection_string = connection_string
self.session_id = session_id
self.database_name = database_name
self.collection_name = collection_name
try:
self.client: MongoClient = MongoClient(connection_string)
except errors.ConnectionFailure as error:
logger.error(error)
self.db = self.client[database_name]
self.collection = self.db[collection_name]
if create_index:
self.collection.create_index("SessionId")
@property
def messages(self) -> List[BaseMessage]: # type: ignore
"""Retrieve the messages from MongoDB"""
from pymongo import errors
try:
cursor = self.collection.find({"SessionId": self.session_id})
except errors.OperationFailure as error:
logger.error(error)
if cursor:
items = [json.loads(document["History"]) for document in cursor]
else:
items = []
messages = messages_from_dict(items)
return messages
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the record in MongoDB"""
from pymongo import errors
try:
self.collection.insert_one(
{
"SessionId": self.session_id,
"History": json.dumps(message_to_dict(message)),
}
)
except errors.WriteError as err:
logger.error(err)
def clear(self) -> None:
"""Clear session memory from MongoDB"""
from pymongo import errors
try:
self.collection.delete_many({"SessionId": self.session_id})
except errors.WriteError as err:
logger.error(err)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.0.0rc1'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.0.0rc0'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _FillType, _get_fill, _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import get_bounding_boxes, has_any, is_pure_tensor, query_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_pure_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_format_bounding_boxes(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop,
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = datapoints.wrap(inpt[params["is_valid"]], like=inpt)
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.wrap(
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
like=inpt,
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _FillType, _get_fill, _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import get_bounding_boxes, has_any, is_simple_tensor, query_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_simple_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_format_bounding_boxes(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop,
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = datapoints.wrap(inpt[params["is_valid"]], like=inpt)
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.wrap(
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
like=inpt,
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 48
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[32, 44],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 48
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[32, 44],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g., learning rate
and momentum."""
priority = 'LOW'
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here. Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here. Defaults to None.
"""
for scheduler in runner.param_schedulers: # type: ignore
if not scheduler.by_epoch:
scheduler.step()
def after_train_epoch(self, runner) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (Runner): The runner of the training process.
"""
for scheduler in runner.param_schedulers: # type: ignore
if scheduler.by_epoch:
scheduler.step()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g., learning rate
and momentum."""
priority = 'LOW'
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here. Defaults to None.
"""
for scheduler in runner.param_schedulers: # type: ignore
if not scheduler.by_epoch:
scheduler.step()
def after_train_epoch(self, runner) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (Runner): The runner of the training process.
"""
for scheduler in runner.param_schedulers: # type: ignore
if scheduler.by_epoch:
scheduler.step()
|
from typing import Any, Dict
import torch
from torchvision.transforms.v2 import functional as F, Transform
class UniformTemporalSubsample(Transform):
"""[BETA] Uniformly subsample ``num_samples`` indices from the temporal dimension of the video.
.. v2betastatus:: UniformTemporalSubsample transform
Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` denotes the temporal dimension.
When ``num_samples`` is larger than the size of temporal dimension of the video, it
will sample frames based on nearest neighbor interpolation.
Args:
num_samples (int): The number of equispaced samples to be selected
"""
_transformed_types = (torch.Tensor,)
def __init__(self, num_samples: int):
super().__init__()
self.num_samples = num_samples
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return F.uniform_temporal_subsample(inpt, self.num_samples)
|
from typing import Any, Dict
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
class UniformTemporalSubsample(Transform):
"""[BETA] Uniformly subsample ``num_samples`` indices from the temporal dimension of the video.
.. v2betastatus:: UniformTemporalSubsample transform
Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` denotes the temporal dimension.
When ``num_samples`` is larger than the size of temporal dimension of the video, it
will sample frames based on nearest neighbor interpolation.
Args:
num_samples (int): The number of equispaced samples to be selected
"""
_transformed_types = (torch.Tensor,)
def __init__(self, num_samples: int):
super().__init__()
self.num_samples = num_samples
def _transform(self, inpt: datapoints._VideoType, params: Dict[str, Any]) -> datapoints._VideoType:
return F.uniform_temporal_subsample(inpt, self.num_samples)
|
import asyncio
from typing import Any, Dict, Generator, List, Union
import pytest
from llama_index.core.schema import (
BaseNode,
IndexNode,
TextNode,
)
from llama_index.core.vector_stores.types import (
VectorStoreQuery,
)
from llama_index.vector_stores.lantern import LanternVectorStore
# for testing find install info here https://github.com/lanterndata/lantern#-quick-install
PARAMS: Dict[str, Union[str, int]] = {
"host": "localhost",
"user": "postgres",
"password": "mark90",
"port": 5432,
}
TEST_DB = "test_vector_db"
TEST_TABLE_NAME = "lorem_ipsum"
TEST_SCHEMA_NAME = "test"
TEST_EMBED_DIM = 3
try:
import asyncpg # noqa
import pgvector # noqa
import psycopg2
import sqlalchemy
import sqlalchemy.ext.asyncio # noqa
# connection check
conn__ = psycopg2.connect(**PARAMS) # type: ignore
conn__.close()
postgres_not_available = False
except (ImportError, Exception):
postgres_not_available = True
def _get_sample_vector(num: float) -> List[float]:
"""
Get sample embedding vector of the form [num, 1, 1, ..., 1]
where the length of the vector is TEST_EMBED_DIM.
"""
return [num] + [1.0] * (TEST_EMBED_DIM - 1)
@pytest.fixture(scope="session")
def conn() -> Any:
import psycopg2
return psycopg2.connect(**PARAMS) # type: ignore
@pytest.fixture()
def db(conn: Any) -> Generator:
conn.autocommit = True
with conn.cursor() as c:
c.execute(f"DROP DATABASE IF EXISTS {TEST_DB}")
c.execute(f"CREATE DATABASE {TEST_DB}")
conn.commit()
yield
with conn.cursor() as c:
c.execute(f"DROP DATABASE {TEST_DB}")
conn.commit()
@pytest.fixture()
def lantern(db: None) -> Any:
lantern_db = LanternVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB,
table_name=TEST_TABLE_NAME,
schema_name=TEST_SCHEMA_NAME,
embed_dim=TEST_EMBED_DIM,
)
yield lantern_db
asyncio.run(lantern_db.close())
@pytest.mark.skipif(postgres_not_available, reason="postgres db is not available")
@pytest.mark.asyncio
async def test_instance_creation(db: None) -> None:
lantern_db = LanternVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB,
table_name=TEST_TABLE_NAME,
schema_name=TEST_SCHEMA_NAME,
)
assert isinstance(lantern_db, LanternVectorStore)
assert not hasattr(lantern_db, "_engine")
assert lantern_db.client is None
await lantern_db.close()
@pytest.mark.skipif(postgres_not_available, reason="postgres db is not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("use_async", [True, False])
async def test_add_to_db_and_query(
lantern_db: LanternVectorStore, node_embeddings: List[TextNode], use_async: bool
) -> None:
if use_async:
await lantern_db.async_add(node_embeddings)
else:
lantern_db.add(node_embeddings)
assert isinstance(lantern_db, LanternVectorStore)
assert hasattr(lantern_db, "_engine")
q = VectorStoreQuery(query_embedding=_get_sample_vector(1.0), similarity_top_k=1)
if use_async:
res = await lantern_db.aquery(q)
else:
res = lantern_db.query(q)
assert res.nodes
assert len(res.nodes) == 1
assert res.nodes[0].node_id == "aaa"
@pytest.mark.skipif(postgres_not_available, reason="postgres db is not available")
@pytest.mark.asyncio
@pytest.mark.parametrize("use_async", [True, False])
async def test_add_to_db_and_query_index_nodes(
lantern_db: LanternVectorStore,
index_node_embeddings: List[BaseNode],
use_async: bool,
) -> None:
if use_async:
await lantern_db.async_add(index_node_embeddings)
else:
lantern_db.add(index_node_embeddings)
assert isinstance(lantern_db, LanternVectorStore)
assert hasattr(lantern_db, "_engine")
q = VectorStoreQuery(query_embedding=_get_sample_vector(5.0), similarity_top_k=2)
if use_async:
res = await lantern_db.aquery(q)
else:
res = lantern_db.query(q)
assert res.nodes
assert len(res.nodes) == 2
assert res.nodes[0].node_id == "aaa_ref"
assert isinstance(res.nodes[0], IndexNode)
assert hasattr(res.nodes[0], "index_id")
assert res.nodes[1].node_id == "bbb"
assert isinstance(res.nodes[1], TextNode)
|
import asyncio
from typing import Any, Dict, Generator, List, Union
import pytest
from llama_index.core.schema import (
BaseNode,
IndexNode,
TextNode,
)
from llama_index.core.vector_stores.types import (
VectorStoreQuery,
)
from llama_index.vector_stores.lantern import LanternVectorStore
# for testing find install info here https://github.com/lanterndata/lantern#-quick-install
PARAMS: Dict[str, Union[str, int]] = {
"host": "localhost",
"user": "postgres",
"password": "mark90",
"port": 5432,
}
TEST_DB = "test_vector_db"
TEST_TABLE_NAME = "lorem_ipsum"
TEST_SCHEMA_NAME = "test"
TEST_EMBED_DIM = 3
try:
import asyncpg # noqa
import pgvector # noqa
import psycopg2
import sqlalchemy
import sqlalchemy.ext.asyncio # noqa
# connection check
conn__ = psycopg2.connect(**PARAMS) # type: ignore
conn__.close()
postgres_not_available = False
except (ImportError, Exception):
postgres_not_available = True
def _get_sample_vector(num: float) -> List[float]:
"""
Get sample embedding vector of the form [num, 1, 1, ..., 1]
where the length of the vector is TEST_EMBED_DIM.
"""
return [num] + [1.0] * (TEST_EMBED_DIM - 1)
@pytest.fixture(scope="session")
def conn() -> Any:
import psycopg2
return psycopg2.connect(**PARAMS) # type: ignore
@pytest.fixture()
def db(conn: Any) -> Generator:
conn.autocommit = True
with conn.cursor() as c:
c.execute(f"DROP DATABASE IF EXISTS {TEST_DB}")
c.execute(f"CREATE DATABASE {TEST_DB}")
conn.commit()
yield
with conn.cursor() as c:
c.execute(f"DROP DATABASE {TEST_DB}")
conn.commit()
@pytest.fixture()
def lantern(db: None) -> Any:
lantern_db = LanternVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB,
table_name=TEST_TABLE_NAME,
schema_name=TEST_SCHEMA_NAME,
embed_dim=TEST_EMBED_DIM,
)
yield lantern_db
asyncio.run(lantern_db.close())
@pytest.mark.skipif(postgres_not_available, reason="postgres db is not available")
@pytest.mark.asyncio()
async def test_instance_creation(db: None) -> None:
lantern_db = LanternVectorStore.from_params(
**PARAMS, # type: ignore
database=TEST_DB,
table_name=TEST_TABLE_NAME,
schema_name=TEST_SCHEMA_NAME,
)
assert isinstance(lantern_db, LanternVectorStore)
assert not hasattr(lantern_db, "_engine")
assert lantern_db.client is None
await lantern_db.close()
@pytest.mark.skipif(postgres_not_available, reason="postgres db is not available")
@pytest.mark.asyncio()
@pytest.mark.parametrize("use_async", [True, False])
async def test_add_to_db_and_query(
lantern_db: LanternVectorStore, node_embeddings: List[TextNode], use_async: bool
) -> None:
if use_async:
await lantern_db.async_add(node_embeddings)
else:
lantern_db.add(node_embeddings)
assert isinstance(lantern_db, LanternVectorStore)
assert hasattr(lantern_db, "_engine")
q = VectorStoreQuery(query_embedding=_get_sample_vector(1.0), similarity_top_k=1)
if use_async:
res = await lantern_db.aquery(q)
else:
res = lantern_db.query(q)
assert res.nodes
assert len(res.nodes) == 1
assert res.nodes[0].node_id == "aaa"
@pytest.mark.skipif(postgres_not_available, reason="postgres db is not available")
@pytest.mark.asyncio()
@pytest.mark.parametrize("use_async", [True, False])
async def test_add_to_db_and_query_index_nodes(
lantern_db: LanternVectorStore,
index_node_embeddings: List[BaseNode],
use_async: bool,
) -> None:
if use_async:
await lantern_db.async_add(index_node_embeddings)
else:
lantern_db.add(index_node_embeddings)
assert isinstance(lantern_db, LanternVectorStore)
assert hasattr(lantern_db, "_engine")
q = VectorStoreQuery(query_embedding=_get_sample_vector(5.0), similarity_top_k=2)
if use_async:
res = await lantern_db.aquery(q)
else:
res = lantern_db.query(q)
assert res.nodes
assert len(res.nodes) == 2
assert res.nodes[0].node_id == "aaa_ref"
assert isinstance(res.nodes[0], IndexNode)
assert hasattr(res.nodes[0], "index_id")
assert res.nodes[1].node_id == "bbb"
assert isinstance(res.nodes[1], TextNode)
|
_base_ = '../grid_rcnn/grid-rcnn_r50_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='GenericRoIExtractor',
aggregation='sum',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2)),
grid_roi_extractor=dict(
type='GenericRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2))))
|
_base_ = '../grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(
type='GenericRoIExtractor',
aggregation='sum',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2)),
grid_roi_extractor=dict(
type='GenericRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='GeneralizedAttention',
in_channels=256,
spatial_range=-1,
num_heads=6,
attention_type='0100',
kv_stride=2))))
|
import numpy as np
import orjson
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import NdArray
def test_proto_tensor():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
tensor._to_node_protobuf()
def test_from_list():
tensor = parse_obj_as(NdArray, [[0.0, 0.0], [0.0, 0.0]])
assert (tensor == np.zeros((2, 2))).all()
def test_json_schema():
schema_json_of(NdArray)
def test_dump_json():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
def test_load_json():
tensor = parse_obj_as(NdArray, np.zeros((2, 2)))
json = orjson_dumps(tensor)
print(json)
print(type(json))
new_tensor = orjson.loads(json)
assert (new_tensor == tensor).all()
def test_unwrap():
tensor = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, NdArray)
assert isinstance(ndarray, np.ndarray)
assert isinstance(tensor, NdArray)
assert (ndarray == np.zeros((3, 224, 224))).all()
|
import numpy as np
import orjson
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import Tensor
def test_proto_tensor():
tensor = parse_obj_as(Tensor, np.zeros((3, 224, 224)))
tensor._to_node_protobuf()
def test_from_list():
tensor = parse_obj_as(Tensor, [[0.0, 0.0], [0.0, 0.0]])
assert (tensor == np.zeros((2, 2))).all()
def test_json_schema():
schema_json_of(Tensor)
def test_dump_json():
tensor = parse_obj_as(Tensor, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
def test_load_json():
tensor = parse_obj_as(Tensor, np.zeros((2, 2)))
json = orjson_dumps(tensor)
print(json)
print(type(json))
new_tensor = orjson.loads(json)
assert (new_tensor == tensor).all()
def test_unwrap():
tensor = parse_obj_as(Tensor, np.zeros((3, 224, 224)))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, Tensor)
assert isinstance(ndarray, np.ndarray)
assert isinstance(tensor, Tensor)
assert (ndarray == np.zeros((3, 224, 224))).all()
|
from __future__ import annotations
import logging
from typing import Literal
import torch
from torch import Tensor
from sentence_transformers.models.InputModule import InputModule
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(InputModule):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
save_in_root: bool = False
config_keys: list[str] = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
def __init__(
self,
vocab: list[str],
word_weights: dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super().__init__()
vocab = list(dict.fromkeys(vocab)) # Ensure vocab is unique
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
f"{num_unknown_words} out of {len(vocab)} words without a weighting value. Set weight to {unknown_word_weight}"
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: list[str], **kwargs) -> list[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(
self, tokenized_texts: list[list[int]], pad_seq_length: int = 0
) -> dict[Literal["sentence_embedding"], torch.Tensor]:
vectors = []
for tokens in tokenized_texts:
vector = torch.zeros(self.get_sentence_embedding_dimension(), dtype=torch.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.stack(vectors)}
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
|
from __future__ import annotations
import json
import logging
import os
from typing import Literal
import torch
from torch import Tensor, nn
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: list[str],
word_weights: dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super().__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
f"{num_unknown_words} out of {len(vocab)} words without a weighting value. Set weight to {unknown_word_weight}"
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: list[str], **kwargs) -> list[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(
self, tokenized_texts: list[list[int]], pad_seq_length: int = 0
) -> dict[Literal["sentence_embedding"], torch.Tensor]:
vectors = []
for tokens in tokenized_texts:
vector = torch.zeros(self.get_sentence_embedding_dimension(), dtype=torch.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.stack(vectors)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python 2d_matryoshka_sts.py
OR
python 2d_matryoshka_sts.py pretrained_transformer_model_name
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
# Read the dataset
train_batch_size = 16
num_epochs = 4
model_save_path = (
"output/2d_matryoshka_sts_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.Matryoshka2dLoss(model, train_loss, [768, 512, 256, 128, 64])
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
# Optionally, save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.save_to_hub(f"{model_name}-sts-2d-matryoshka")
except Exception:
logging.error(
"Error uploading model to the Hugging Face Hub. To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({model_save_path!r})` "
f"and saving it using `model.save_to_hub('{model_name}-sts-2d-matryoshka')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python 2d_matryoshka_sts.py
OR
python 2d_matryoshka_sts.py pretrained_transformer_model_name
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
# Read the dataset
train_batch_size = 16
num_epochs = 4
model_save_path = (
"output/2d_matryoshka_sts_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.Matryoshka2dLoss(model, train_loss, [768, 512, 256, 128, 64])
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
# Optionally, save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.save_to_hub(f"{model_name}-sts-2d-matryoshka")
except Exception:
logging.error(
"Error uploading model to the Hugging Face Hub. To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({model_save_path!r})` "
f"and saving it using `model.save_to_hub('{model_name}-sts-2d-matryoshka')`."
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def convert_stem(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('stem.conv', 'conv1')
new_key = new_key.replace('stem.bn', 'bn1')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('head.fc', 'fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_reslayer(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer, block, module = split_keys[:3]
block_id = int(block[1:])
layer_name = f'layer{int(layer[1:])}'
block_name = f'{block_id - 1}'
if block_id == 1 and module == 'bn':
new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}'
elif block_id == 1 and module == 'proj':
new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}'
elif module == 'f':
if split_keys[3] == 'a_bn':
module_name = 'bn1'
elif split_keys[3] == 'b_bn':
module_name = 'bn2'
elif split_keys[3] == 'c_bn':
module_name = 'bn3'
elif split_keys[3] == 'a':
module_name = 'conv1'
elif split_keys[3] == 'b':
module_name = 'conv2'
elif split_keys[3] == 'c':
module_name = 'conv3'
new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}'
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
"""Convert keys in pycls pretrained RegNet models to mmdet style."""
# load caffe model
regnet_model = torch.load(src)
blobs = regnet_model['model_state']
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'stem' in key:
convert_stem(key, weight, state_dict, converted_names)
elif 'head' in key:
convert_head(key, weight, state_dict, converted_names)
elif key.startswith('s'):
convert_reslayer(key, weight, state_dict, converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
|
import argparse
from collections import OrderedDict
import torch
def convert_stem(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('stem.conv', 'conv1')
new_key = new_key.replace('stem.bn', 'bn1')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('head.fc', 'fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_reslayer(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer, block, module = split_keys[:3]
block_id = int(block[1:])
layer_name = f'layer{int(layer[1:])}'
block_name = f'{block_id - 1}'
if block_id == 1 and module == 'bn':
new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}'
elif block_id == 1 and module == 'proj':
new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}'
elif module == 'f':
if split_keys[3] == 'a_bn':
module_name = 'bn1'
elif split_keys[3] == 'b_bn':
module_name = 'bn2'
elif split_keys[3] == 'c_bn':
module_name = 'bn3'
elif split_keys[3] == 'a':
module_name = 'conv1'
elif split_keys[3] == 'b':
module_name = 'conv2'
elif split_keys[3] == 'c':
module_name = 'conv3'
new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}'
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
"""Convert keys in pycls pretrained RegNet models to mmdet style."""
# load caffe model
regnet_model = torch.load(src)
blobs = regnet_model['model_state']
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'stem' in key:
convert_stem(key, weight, state_dict, converted_names)
elif 'head' in key:
convert_head(key, weight, state_dict, converted_names)
elif key.startswith('s'):
convert_reslayer(key, weight, state_dict, converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class MaskRCNN(TwoStageDetector):
"""Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(MaskRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class MaskRCNN(TwoStageDetector):
"""Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(MaskRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer, util
class DistillKLDivLoss(nn.Module):
# TODO
def __init__(self, model: SentenceTransformer, similarity_fct=util.pairwise_dot_score) -> None:
super().__init__()
self.model = model
self.similarity_fct = similarity_fct
self.loss_fct = nn.KLDivLoss(reduction="none")
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings, labels)
def compute_loss_from_embeddings(self, embeddings: list[Tensor], labels: Tensor) -> Tensor:
embeddings_query = embeddings[0]
embeddings_pos = embeddings[1]
embeddings_negs = embeddings[2:]
# Compute student scores
student_scores_pos = self.similarity_fct(embeddings_query, embeddings_pos)
if len(embeddings_negs) == 1:
# Single negative case
student_scores_neg = self.similarity_fct(embeddings_query, embeddings_negs[0])
else:
# Multiple negatives case
student_scores_neg = torch.stack(
[self.similarity_fct(embeddings_query, neg) for neg in embeddings_negs],
dim=1,
)
# Teacher scores
teacher_pos_scores = labels[:, 0].unsqueeze(1)
teacher_neg_scores = labels[:, 1:]
# Prepare student scores to match teacher scores shape
student_scores_pos = student_scores_pos.unsqueeze(1)
# Create log probabilities for student scores
if len(embeddings_negs) == 1:
student_scores_neg = student_scores_neg.unsqueeze(1)
student_scores = torch.cat([student_scores_pos, student_scores_neg], dim=1)
else:
student_scores = torch.cat([student_scores_pos, student_scores_neg], dim=1)
student_log_probs = torch.log_softmax(student_scores, dim=1)
# Create probabilities for teacher scores
teacher_scores = torch.cat([teacher_pos_scores, teacher_neg_scores], dim=1)
teacher_probs = torch.softmax(teacher_scores, dim=1)
# KL Divergence
loss = self.loss_fct(student_log_probs, teacher_probs).sum(dim=1).mean()
return loss
@property
def citation(self) -> str:
return """
@misc{lin2020distillingdenserepresentationsranking,
title={Distilling Dense Representations for Ranking using Tightly-Coupled Teachers},
author={Sheng-Chieh Lin and Jheng-Hong Yang and Jimmy Lin},
year={2020},
eprint={2010.11386},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2010.11386},
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer, util
class DistillKLDivLoss(nn.Module):
# TODO
def __init__(self, model: SentenceTransformer, similarity_fct=util.pairwise_dot_score) -> None:
super().__init__()
self.model = model
self.similarity_fct = similarity_fct
self.loss_fct = nn.KLDivLoss(reduction="none")
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings, labels)
def compute_loss_from_embeddings(self, embeddings: list[Tensor], labels: Tensor) -> Tensor:
embeddings_query = embeddings[0]
embeddings_pos = embeddings[1]
embeddings_neg = embeddings[2]
# Compute student scores
student_scores_pos = self.similarity_fct(embeddings_query, embeddings_pos)
student_scores_neg = self.similarity_fct(embeddings_query, embeddings_neg)
# Pack into one tensor and apply log_softmax
student_scores = torch.stack([student_scores_pos, student_scores_neg], dim=1)
student_log_probs = torch.log_softmax(student_scores, dim=1)
# Labels contain teacher similarity scores (already computed before training)
# We expect labels to contain the teacher_pos_score and teacher_neg_score
teacher_pos_scores = labels[:, 0]
teacher_neg_scores = labels[:, 1]
teacher_scores = torch.stack([teacher_pos_scores, teacher_neg_scores], dim=1)
teacher_probs = torch.softmax(teacher_scores, dim=1)
# KL Divergence
loss = self.loss_fct(student_log_probs, teacher_probs).sum(dim=1).mean()
return loss
@property
def citation(self) -> str:
return """
@misc{lin2020distillingdenserepresentationsranking,
title={Distilling Dense Representations for Ranking using Tightly-Coupled Teachers},
author={Sheng-Chieh Lin and Jheng-Hong Yang and Jimmy Lin},
year={2020},
eprint={2010.11386},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2010.11386},
}
"""
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/segmentation/cityscapes/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/segmentation/',
# 'data/': 's3://openmmlab/datasets/segmentation/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
metric='bbox',
backend_args=backend_args)
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
metric='bbox')
test_evaluator = val_evaluator
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.registry import MODELS
from .anchor_head import AnchorHead
@MODELS.register_module()
class RetinaHead(AnchorHead):
r"""An anchor-based head used in `RetinaNet
<https://arxiv.org/pdf/1708.02002.pdf>`_.
The head contains two subnetworks. The first classifies anchor boxes and
the second regresses deltas for the anchors.
Example:
>>> import torch
>>> self = RetinaHead(11, 7)
>>> x = torch.rand(1, 7, 32, 32)
>>> cls_score, bbox_pred = self.forward_single(x)
>>> # Each anchor predicts a score for each class except background
>>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
>>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
>>> assert cls_per_anchor == (self.num_classes)
>>> assert box_per_anchor == 4
"""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)),
**kwargs):
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(RetinaHead, self).__init__(
num_classes,
in_channels,
anchor_generator=anchor_generator,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
reg_dim = self.bbox_coder.encode_size
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_base_priors * reg_dim, 3, padding=1)
def forward_single(self, x):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale
level, the channels number is num_anchors * 4.
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return cls_score, bbox_pred
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.registry import MODELS
from .anchor_head import AnchorHead
@MODELS.register_module()
class RetinaHead(AnchorHead):
r"""An anchor-based head used in `RetinaNet
<https://arxiv.org/pdf/1708.02002.pdf>`_.
The head contains two subnetworks. The first classifies anchor boxes and
the second regresses deltas for the anchors.
Example:
>>> import torch
>>> self = RetinaHead(11, 7)
>>> x = torch.rand(1, 7, 32, 32)
>>> cls_score, bbox_pred = self.forward_single(x)
>>> # Each anchor predicts a score for each class except background
>>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
>>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
>>> assert cls_per_anchor == (self.num_classes)
>>> assert box_per_anchor == 4
"""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)),
**kwargs):
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(RetinaHead, self).__init__(
num_classes,
in_channels,
anchor_generator=anchor_generator,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def forward_single(self, x):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale
level, the channels number is num_anchors * 4.
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return cls_score, bbox_pred
|
import torch
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
def cholesky(x):
return torch.linalg.cholesky(x)
def det(x):
return torch.det(x)
def eig(x):
return torch.linalg.eig(x)
def eigh(x):
return torch.linalg.eigh(x)
def inv(x):
return torch.linalg.inv(x)
def lu_factor(x):
LU, pivots = torch.linalg.lu_factor(x)
# torch returns pivots with 1-based indexing
return LU, pivots - 1
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return torch.linalg.norm(x, ord=ord, dim=axis, keepdim=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return torch.linalg.qr(x, mode=mode)
def solve(a, b):
return torch.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if b.ndim == a.ndim - 1:
b = torch.unsqueeze(b, axis=-1)
return torch.linalg.solve_triangular(a, b, upper=not lower).squeeze(
axis=-1
)
return torch.linalg.solve_triangular(a, b, upper=not lower)
def svd(x, full_matrices=True, compute_uv=True):
if not compute_uv:
return torch.linalg.svdvals(x)
return torch.linalg.svd(x, full_matrices=full_matrices)
def lstsq(a, b, rcond=None):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return torch.linalg.lstsq(a, b, rcond=rcond)[0]
|
import torch
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
def cholesky(x):
return torch.linalg.cholesky(x)
def det(x):
return torch.det(x)
def eig(x):
return torch.linalg.eig(x)
def eigh(x):
return torch.linalg.eigh(x)
def inv(x):
return torch.linalg.inv(x)
def lu_factor(x):
LU, pivots = torch.linalg.lu_factor(x)
# torch retuns pivots with 1-based indexing
return LU, pivots - 1
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return torch.linalg.norm(x, ord=ord, dim=axis, keepdim=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return torch.linalg.qr(x, mode=mode)
def solve(a, b):
return torch.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if b.ndim == a.ndim - 1:
b = torch.unsqueeze(b, axis=-1)
return torch.linalg.solve_triangular(a, b, upper=not lower).squeeze(
axis=-1
)
return torch.linalg.solve_triangular(a, b, upper=not lower)
def svd(x, full_matrices=True, compute_uv=True):
if not compute_uv:
return torch.linalg.svdvals(x)
return torch.linalg.svd(x, full_matrices=full_matrices)
def lstsq(a, b, rcond=None):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return torch.linalg.lstsq(a, b, rcond=rcond)[0]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.