input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from typing import TYPE_CHECKING, Union, BinaryIO
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri, _get_file_context
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class BlobDataMixin:
"""Provide helper functions for :class:`Document` to handle binary data."""
def load_uri_to_blob(self: 'T', **kwargs) -> 'T':
"""Convert :attr:`.uri` to :attr:`.blob` inplace.
Internally it downloads from the URI and set :attr:`blob`.
:param kwargs: keyword arguments to pass to `:meth:_uri_to_blob` such as timeout
:return: itself after processed
"""
self.blob = _uri_to_blob(self.uri, **kwargs)
return self
def convert_blob_to_datauri(
self: 'T', charset: str = 'utf-8', base64: bool = False
) -> 'T':
"""Convert :attr:`.blob` to data :attr:`.uri` in place.
Internally it first reads into blob and then converts it to data URI.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit.
Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that
frequently uses non-US-ASCII characters.
:return: itself after processed
"""
if not self.mime_type:
raise ValueError(
f'{self.mime_type} is unset, can not convert it to data uri'
)
self.uri = _to_datauri(self.mime_type, self.blob, charset, base64, binary=True)
return self
def save_blob_to_file(self: 'T', file: Union[str, BinaryIO]) -> 'T':
"""Save :attr:`.blob` into a file
:param file: File or filename to which the data is saved.
:return: itself after processed
"""
fp = _get_file_context(file)
with fp:
fp.write(self.blob)
return self
|
from typing import TYPE_CHECKING, Union, BinaryIO
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri, _get_file_context
if TYPE_CHECKING:
from docarray.typing import T
class BlobDataMixin:
"""Provide helper functions for :class:`Document` to handle binary data."""
def load_uri_to_blob(self: 'T', **kwargs) -> 'T':
"""Convert :attr:`.uri` to :attr:`.blob` inplace.
Internally it downloads from the URI and set :attr:`blob`.
:param kwargs: keyword arguments to pass to `:meth:_uri_to_blob` such as timeout
:return: itself after processed
"""
self.blob = _uri_to_blob(self.uri, **kwargs)
return self
def convert_blob_to_datauri(
self: 'T', charset: str = 'utf-8', base64: bool = False
) -> 'T':
"""Convert :attr:`.blob` to data :attr:`.uri` in place.
Internally it first reads into blob and then converts it to data URI.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit.
Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that
frequently uses non-US-ASCII characters.
:return: itself after processed
"""
if not self.mime_type:
raise ValueError(
f'{self.mime_type} is unset, can not convert it to data uri'
)
self.uri = _to_datauri(self.mime_type, self.blob, charset, base64, binary=True)
return self
def save_blob_to_file(self: 'T', file: Union[str, BinaryIO]) -> 'T':
"""Save :attr:`.blob` into a file
:param file: File or filename to which the data is saved.
:return: itself after processed
"""
fp = _get_file_context(file)
with fp:
fp.write(self.blob)
return self
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import re
from typing import Dict, List, Optional
from jina import Document, DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
class Sentencizer(Executor):
"""
:class:`Sentencizer` split the text on the doc-level
into sentences on the chunk-level with a rule-base strategy.
The text is split by the punctuation characters listed in ``punct_chars``.
The sentences that are shorter than the ``min_sent_len``
or longer than the ``max_sent_len`` after stripping will be discarded.
"""
def __init__(
self,
min_sent_len: int = 1,
max_sent_len: int = 512,
punct_chars: Optional[List[str]] = None,
uniform_weight: bool = True,
traversal_paths: str = '@r',
*args,
**kwargs
):
"""
:param min_sent_len: the minimal number of characters,
(including white spaces) of the sentence, by default 1.
:param max_sent_len: the maximal number of characters,
(including white spaces) of the sentence, by default 512.
:param punct_chars: the punctuation characters to split on,
whatever is in the list will be used,
for example ['!', '.', '?'] will use '!', '.' and '?'
:param uniform_weight: the definition of it should have
uniform weight or should be calculated
:param traversal_paths: traverse path on docs, e.g. '@r', '@r,c'
"""
super().__init__(*args, **kwargs)
self.min_sent_len = min_sent_len
self.max_sent_len = max_sent_len
self.punct_chars = punct_chars
self.uniform_weight = uniform_weight
self.logger = JinaLogger(self.__class__.__name__)
self.traversal_paths = traversal_paths
if not punct_chars:
self.punct_chars = [
'!',
'.',
'?',
'։',
'؟',
'۔',
'܀',
'܁',
'܂',
'‼',
'‽',
'⁇',
'⁈',
'⁉',
'⸮',
'﹖',
'﹗',
'!',
'.',
'?',
'。',
'。',
'\n',
]
if self.min_sent_len > self.max_sent_len:
self.logger.warning(
'the min_sent_len (={}) should be smaller or equal to the max_sent_len (={})'.format(
self.min_sent_len, self.max_sent_len
)
)
self._slit_pat = re.compile(
r'\s*([^{0}]+)(?<!\s)[{0}]*'.format(''.join(set(self.punct_chars)))
)
@requests
def segment(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Split the text into sentences.
:param docs: Documents that contain the text
:param parameters: Dictionary of parameters
:param kwargs: Additional keyword arguments
:return: a list of chunk dicts with the split sentences
"""
if not docs:
return
traversal_path = parameters.get('traversal_paths', self.traversal_paths)
flat_docs = docs[traversal_path]
for doc in flat_docs:
text = doc.text
ret = [
(m.group(0), m.start(), m.end())
for m in re.finditer(self._slit_pat, text)
]
if not ret:
ret = [(text, 0, len(text))]
for ci, (r, s, e) in enumerate(ret):
f = re.sub('\n+', ' ', r).strip()
f = f[: self.max_sent_len]
if len(f) > self.min_sent_len:
doc.chunks.append(
Document(
text=f,
offset=ci,
weight=1.0 if self.uniform_weight else len(f) / len(text),
location=[s, e],
)
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import re
from typing import Dict, List, Optional, Tuple
from jina import Document, DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
class Sentencizer(Executor):
"""
:class:`Sentencizer` split the text on the doc-level
into sentences on the chunk-level with a rule-base strategy.
The text is split by the punctuation characters listed in ``punct_chars``.
The sentences that are shorter than the ``min_sent_len``
or longer than the ``max_sent_len`` after stripping will be discarded.
"""
def __init__(
self,
min_sent_len: int = 1,
max_sent_len: int = 512,
punct_chars: Optional[List[str]] = None,
uniform_weight: bool = True,
traversal_paths: Tuple[str] = ('r',),
*args,
**kwargs
):
"""
:param min_sent_len: the minimal number of characters,
(including white spaces) of the sentence, by default 1.
:param max_sent_len: the maximal number of characters,
(including white spaces) of the sentence, by default 512.
:param punct_chars: the punctuation characters to split on,
whatever is in the list will be used,
for example ['!', '.', '?'] will use '!', '.' and '?'
:param uniform_weight: the definition of it should have
uniform weight or should be calculated
:param traversal_paths: traverse path on docs, e.g. ['r'], ['c']
"""
super().__init__(*args, **kwargs)
self.min_sent_len = min_sent_len
self.max_sent_len = max_sent_len
self.punct_chars = punct_chars
self.uniform_weight = uniform_weight
self.logger = JinaLogger(self.__class__.__name__)
self.traversal_paths = traversal_paths
if not punct_chars:
self.punct_chars = [
'!',
'.',
'?',
'։',
'؟',
'۔',
'܀',
'܁',
'܂',
'‼',
'‽',
'⁇',
'⁈',
'⁉',
'⸮',
'﹖',
'﹗',
'!',
'.',
'?',
'。',
'。',
'\n',
]
if self.min_sent_len > self.max_sent_len:
self.logger.warning(
'the min_sent_len (={}) should be smaller or equal to the max_sent_len (={})'.format(
self.min_sent_len, self.max_sent_len
)
)
self._slit_pat = re.compile(
r'\s*([^{0}]+)(?<!\s)[{0}]*'.format(''.join(set(self.punct_chars)))
)
@requests
def segment(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Split the text into sentences.
:param docs: Documents that contain the text
:param parameters: Dictionary of parameters
:param kwargs: Additional keyword arguments
:return: a list of chunk dicts with the split sentences
"""
if not docs:
return
traversal_path = parameters.get('traversal_paths', self.traversal_paths)
flat_docs = docs.traverse_flat(traversal_path)
for doc in flat_docs:
text = doc.text
ret = [
(m.group(0), m.start(), m.end())
for m in re.finditer(self._slit_pat, text)
]
if not ret:
ret = [(text, 0, len(text))]
for ci, (r, s, e) in enumerate(ret):
f = re.sub('\n+', ' ', r).strip()
f = f[: self.max_sent_len]
if len(f) > self.min_sent_len:
doc.chunks.append(
Document(
text=f,
offset=ci,
weight=1.0 if self.uniform_weight else len(f) / len(text),
location=[s, e],
)
)
|
_base_ = 'grounding_dino_swin-t_pretrain_obj365.py'
o365v1_od_dataset = dict(
type='ODVGDataset',
data_root='data/objects365v1/',
ann_file='o365v1_train_odvg.json',
label_map_file='o365v1_label_map.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None,
)
flickr30k_dataset = dict(
type='ODVGDataset',
data_root='data/flickr30k_entities/',
ann_file='final_flickr_separateGT_train_vg.json',
label_map_file=None,
data_prefix=dict(img='flickr30k_images/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
gqa_dataset = dict(
type='ODVGDataset',
data_root='data/gqa/',
ann_file='final_mixed_train_no_coco_vg.json',
label_map_file=None,
data_prefix=dict(img='images/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
train_dataloader = dict(
dataset=dict(datasets=[o365v1_od_dataset, flickr30k_dataset, gqa_dataset]))
|
_base_ = 'grounding_dino_swin-t_pretrain_obj365.py'
o365v1_od_dataset = dict(
type='ODVGDataset',
data_root='data/objects365v1/',
ann_file='o365v1_train_odvg.jsonl',
label_map_file='o365v1_label_map.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None,
)
flickr30k_dataset = dict(
type='ODVGDataset',
data_root='data/flickr30k_entities/',
ann_file='final_flickr_separateGT_train_vg.json',
label_map_file=None,
data_prefix=dict(img='flickr30k_images/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
gqa_dataset = dict(
type='ODVGDataset',
data_root='data/gqa/',
ann_file='final_mixed_train_no_coco_vg.json',
label_map_file=None,
data_prefix=dict(img='images/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
train_dataloader = dict(
dataset=dict(datasets=[o365v1_od_dataset, flickr30k_dataset, gqa_dataset]))
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 4),
stages=(False, True, True, True),
position='after_conv3')
]))
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 4),
stages=(False, True, True, True),
position='after_conv3')
]))
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.10.2.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.10.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
"""Argparser module for WorkerRuntime"""
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.runtime import mixin_base_runtime_parser
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='WorkerRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--uses-dynamic-batching',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `dynamic_batching` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/concepts/executor/executor-files/>`__
''',
)
gp.add_argument(
'--output-array-type',
type=str,
default=None,
help='''
The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
''',
)
gp.add_argument(
'--exit-on-exceptions',
type=str,
default=[],
nargs='*',
help='List of exceptions that will cause the Executor to shut down.',
)
gp.add_argument(
'--no-reduce',
'--disable-reduce',
action='store_true',
default=False,
help='Disable the built-in reduction mechanism. Set this if the reduction is to be handled by the Executor itself by operating on a `docs_matrix` or `docs_map`',
)
mixin_base_runtime_parser(gp)
|
"""Argparser module for WorkerRuntime"""
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.runtime import mixin_base_runtime_parser
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='WorkerRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/concepts/executor/executor-files/>`__
''',
)
gp.add_argument(
'--output-array-type',
type=str,
default=None,
help='''
The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
''',
)
gp.add_argument(
'--exit-on-exceptions',
type=str,
default=[],
nargs='*',
help='List of exceptions that will cause the Executor to shut down.',
)
gp.add_argument(
'--no-reduce',
'--disable-reduce',
action='store_true',
default=False,
help='Disable the built-in reduction mechanism. Set this if the reduction is to be handled by the Executor itself by operating on a `docs_matrix` or `docs_map`',
)
mixin_base_runtime_parser(gp)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .mask2former_track_head import Mask2FormerTrackHead
from .quasi_dense_embed_head import QuasiDenseEmbedHead
from .quasi_dense_track_head import QuasiDenseTrackHead
__all__ = [
'QuasiDenseEmbedHead', 'QuasiDenseTrackHead', 'Mask2FormerTrackHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .quasi_dense_embed_head import QuasiDenseEmbedHead
from .quasi_dense_track_head import QuasiDenseTrackHead
__all__ = ['QuasiDenseEmbedHead', 'QuasiDenseTrackHead']
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import mmcv
from mmdet.datasets import CocoPanopticDataset
def _create_panoptic_style_json(json_name):
image1 = {
'id': 0,
'width': 640,
'height': 640,
'file_name': 'fake_name1.jpg',
}
image2 = {
'id': 1,
'width': 640,
'height': 800,
'file_name': 'fake_name2.jpg',
}
images = [image1, image2]
annotations = [
{
'segments_info': [{
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0
}, {
'id': 2,
'category_id': 1,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 0
}, {
'id': 3,
'category_id': 2,
'iscrowd': 0,
'bbox': [1, 189, 612, 285],
'area': 70036
}],
'file_name':
'fake_name1.jpg',
'image_id':
0
},
{
'segments_info': [
{
# Different to instance style json, there
# are duplicate ids in panoptic style json
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0
},
{
'id': 4,
'category_id': 1,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 1
},
{
'id': 5,
'category_id': 2,
'iscrowd': 0,
'bbox': [100, 200, 200, 300],
'area': 66666
},
{
'id': 6,
'category_id': 0,
'iscrowd': 0,
'bbox': [1, 189, -10, 285],
'area': 70036
}
],
'file_name':
'fake_name2.jpg',
'image_id':
1
}
]
categories = [{
'id': 0,
'name': 'car',
'supercategory': 'car',
'isthing': 1
}, {
'id': 1,
'name': 'person',
'supercategory': 'person',
'isthing': 1
}, {
'id': 2,
'name': 'wall',
'supercategory': 'wall',
'isthing': 0
}]
fake_json = {
'images': images,
'annotations': annotations,
'categories': categories
}
mmcv.dump(fake_json, json_name)
return fake_json
def test_load_panoptic_style_json():
tmp_dir = tempfile.TemporaryDirectory()
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
fake_json = _create_panoptic_style_json(fake_json_file)
dataset = CocoPanopticDataset(
ann_file=fake_json_file,
classes=[cat['name'] for cat in fake_json['categories']],
pipeline=[])
ann = dataset.get_ann_info(0)
# two legal instances
assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == 2
# three masks for both foreground and background
assert len(ann['masks']) == 3
ann = dataset.get_ann_info(1)
# one legal instance, one illegal instance,
# one crowd instance and one background mask
assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == 1
assert ann['bboxes_ignore'].shape[0] == 1
assert len(ann['masks']) == 3
|
import os.path as osp
import tempfile
import mmcv
from mmdet.datasets import CocoPanopticDataset
def _create_panoptic_style_json(json_name):
image1 = {
'id': 0,
'width': 640,
'height': 640,
'file_name': 'fake_name1.jpg',
}
image2 = {
'id': 1,
'width': 640,
'height': 800,
'file_name': 'fake_name2.jpg',
}
images = [image1, image2]
annotations = [
{
'segments_info': [{
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0
}, {
'id': 2,
'category_id': 1,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 0
}, {
'id': 3,
'category_id': 2,
'iscrowd': 0,
'bbox': [1, 189, 612, 285],
'area': 70036
}],
'file_name':
'fake_name1.jpg',
'image_id':
0
},
{
'segments_info': [
{
# Different to instance style json, there
# are duplicate ids in panoptic style json
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0
},
{
'id': 4,
'category_id': 1,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 1
},
{
'id': 5,
'category_id': 2,
'iscrowd': 0,
'bbox': [100, 200, 200, 300],
'area': 66666
},
{
'id': 6,
'category_id': 0,
'iscrowd': 0,
'bbox': [1, 189, -10, 285],
'area': 70036
}
],
'file_name':
'fake_name2.jpg',
'image_id':
1
}
]
categories = [{
'id': 0,
'name': 'car',
'supercategory': 'car',
'isthing': 1
}, {
'id': 1,
'name': 'person',
'supercategory': 'person',
'isthing': 1
}, {
'id': 2,
'name': 'wall',
'supercategory': 'wall',
'isthing': 0
}]
fake_json = {
'images': images,
'annotations': annotations,
'categories': categories
}
mmcv.dump(fake_json, json_name)
return fake_json
def test_load_panoptic_style_json():
tmp_dir = tempfile.TemporaryDirectory()
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
fake_json = _create_panoptic_style_json(fake_json_file)
dataset = CocoPanopticDataset(
ann_file=fake_json_file,
classes=[cat['name'] for cat in fake_json['categories']],
pipeline=[])
ann = dataset.get_ann_info(0)
# two legal instances
assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == 2
# three masks for both foreground and background
assert len(ann['masks']) == 3
ann = dataset.get_ann_info(1)
# one legal instance, one illegal instance,
# one crowd instance and one background mask
assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == 1
assert ann['bboxes_ignore'].shape[0] == 1
assert len(ann['masks']) == 3
|
"""Code to help indexing data into a vectorstore.
This package contains helper logic to help deal with indexing data into
a vectorstore while avoiding duplicated content and over-writing content
if it's unchanged.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.indexing.api import IndexingResult, aindex, index
from langchain_core.indexing.base import (
DeleteResponse,
DocumentIndex,
InMemoryRecordManager,
RecordManager,
UpsertResponse,
)
__all__ = (
"DeleteResponse",
"DocumentIndex",
"InMemoryRecordManager",
"IndexingResult",
"RecordManager",
"UpsertResponse",
"aindex",
"index",
)
_dynamic_imports = {
"aindex": "api",
"index": "api",
"IndexingResult": "api",
"DeleteResponse": "base",
"DocumentIndex": "base",
"InMemoryRecordManager": "base",
"RecordManager": "base",
"UpsertResponse": "base",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Code to help indexing data into a vectorstore.
This package contains helper logic to help deal with indexing data into
a vectorstore while avoiding duplicated content and over-writing content
if it's unchanged.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.indexing.api import IndexingResult, aindex, index
from langchain_core.indexing.base import (
DeleteResponse,
DocumentIndex,
InMemoryRecordManager,
RecordManager,
UpsertResponse,
)
__all__ = (
"aindex",
"DeleteResponse",
"DocumentIndex",
"index",
"IndexingResult",
"InMemoryRecordManager",
"RecordManager",
"UpsertResponse",
)
_dynamic_imports = {
"aindex": "api",
"index": "api",
"IndexingResult": "api",
"DeleteResponse": "base",
"DocumentIndex": "base",
"InMemoryRecordManager": "base",
"RecordManager": "base",
"UpsertResponse": "base",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_hf_hub_url(repo_id, filename, revision):
url = hf_hub_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("path", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_hf_hub_url(repo_id, path, revision):
url = hf_hub_url(repo_id=repo_id, path=path, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(path)}"
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.60
Model Sparsity: Active Dimensions: 112.3, Sparsity Ratio: 0.9963
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4450
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
Model Sparsity: Active Dimensions: 113.6, Sparsity Ratio: 0.9963
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
|
import logging
import os
from contextlib import asynccontextmanager
from uuid import uuid4
from dotenv import load_dotenv
from prisma import Prisma
from pydantic import BaseModel, Field, field_validator
from backend.util.retry import conn_retry
load_dotenv()
PRISMA_SCHEMA = os.getenv("PRISMA_SCHEMA", "schema.prisma")
os.environ["PRISMA_SCHEMA_PATH"] = PRISMA_SCHEMA
prisma = Prisma(auto_register=True)
logger = logging.getLogger(__name__)
@conn_retry("Prisma", "Acquiring connection")
async def connect():
if prisma.is_connected():
return
await prisma.connect()
if not prisma.is_connected():
raise ConnectionError("Failed to connect to Prisma.")
@conn_retry("Prisma", "Releasing connection")
async def disconnect():
if not prisma.is_connected():
return
await prisma.disconnect()
if prisma.is_connected():
raise ConnectionError("Failed to disconnect from Prisma.")
@asynccontextmanager
async def transaction():
async with prisma.tx() as tx:
yield tx
class BaseDbModel(BaseModel):
id: str = Field(default_factory=lambda: str(uuid4()))
@field_validator("id", mode="before")
def set_model_id(cls, id: str) -> str:
# In case an empty ID is submitted
return id or str(uuid4())
|
import logging
import os
from contextlib import asynccontextmanager
from uuid import uuid4
from dotenv import load_dotenv
from prisma import Prisma
from pydantic import BaseModel, Field, field_validator
from backend.util.retry import conn_retry
load_dotenv()
PRISMA_SCHEMA = os.getenv("PRISMA_SCHEMA", "schema.prisma")
os.environ["PRISMA_SCHEMA_PATH"] = PRISMA_SCHEMA
prisma = Prisma(auto_register=True)
logger = logging.getLogger(__name__)
@conn_retry("Prisma", "Acquiring connection")
async def connect():
if prisma.is_connected():
return
await prisma.connect()
@conn_retry("Prisma", "Releasing connection")
async def disconnect():
if not prisma.is_connected():
return
await prisma.disconnect()
@asynccontextmanager
async def transaction():
async with prisma.tx() as tx:
yield tx
class BaseDbModel(BaseModel):
id: str = Field(default_factory=lambda: str(uuid4()))
@field_validator("id", mode="before")
def set_model_id(cls, id: str) -> str:
# In case an empty ID is submitted
return id or str(uuid4())
|
from pydantic import AnyUrl as BaseAnyUrl
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
class AnyUrl(BaseAnyUrl, BaseNode):
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(text=str(self))
|
from pydantic import AnyUrl as BaseAnyUrl
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
class AnyUrl(BaseAnyUrl, BaseNode):
def _to_nested_item_protobuf(self) -> 'NodeProto':
"""Convert Document into a nested item protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(text=str(self))
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='Text')
class Text(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`Text.url`), a str (`Text.text`),
and an AnyEmbedding (`Text.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Text
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can initialize directly from a string:
.. code-block:: python
from docarray.documents import Text
txt_doc = Text('hello world')
You can extend this Document:
.. code-block:: python
from docarray.documents import Text
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
including `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
.. code-block:: python
from docarray.documents Text
doc = Text(text='This is the main text', url='exampleurl.com')
doc2 = Text(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # False, their ids are not equivalent
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[AnyEmbedding] = None
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `Text` behave the same as an `str`.
.. code-block:: python
from docarray.documents import Text
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='Text')
class Text(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`Text.url`), a str (`Text.text`),
and an AnyEmbedding (`Text.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Text
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can extend this Document:
.. code-block:: python
from docarray.documents import Text
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
including `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
.. code-block:: python
from docarray.documents Text
doc = Text(text='This is the main text', url='exampleurl.com')
doc2 = Text(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # False, their ids are not equivalent
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[AnyEmbedding] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `Text` behave the same as an `str`.
.. code-block:: python
from docarray.documents import Text
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform as affine_transform
from keras.src.ops.image import crop_images as crop_images
from keras.src.ops.image import elastic_transform as elastic_transform
from keras.src.ops.image import extract_patches as extract_patches
from keras.src.ops.image import gaussian_blur as gaussian_blur
from keras.src.ops.image import hsv_to_rgb as hsv_to_rgb
from keras.src.ops.image import map_coordinates as map_coordinates
from keras.src.ops.image import pad_images as pad_images
from keras.src.ops.image import perspective_transform as perspective_transform
from keras.src.ops.image import resize as resize
from keras.src.ops.image import rgb_to_grayscale as rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv as rgb_to_hsv
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import elastic_transform
from keras.src.ops.image import extract_patches
from keras.src.ops.image import gaussian_blur
from keras.src.ops.image import hsv_to_rgb
from keras.src.ops.image import map_coordinates
from keras.src.ops.image import pad_images
from keras.src.ops.image import perspective_transform
from keras.src.ops.image import resize
from keras.src.ops.image import rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import DropPath
from mmcv.runner import BaseModule
from .se_layer import SELayer
class InvertedResidual(BaseModule):
"""Inverted Residual Block.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
mid_channels (int): The input channels of the depthwise convolution.
kernel_size (int): The kernel size of the depthwise convolution.
Default: 3.
stride (int): The stride of the depthwise convolution. Default: 1.
se_cfg (dict): Config dict for se layer. Default: None, which means no
se layer.
with_expand_conv (bool): Use expand conv or not. If set False,
mid_channels must be the same with in_channels.
Default: True.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
drop_path_rate (float): stochastic depth rate. Defaults to 0.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
with_expand_conv=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
drop_path_rate=0.,
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.drop_path = DropPath(
drop_path_rate) if drop_path_rate > 0 else nn.Identity()
self.with_se = se_cfg is not None
self.with_expand_conv = with_expand_conv
if self.with_se:
assert isinstance(se_cfg, dict)
if not self.with_expand_conv:
assert mid_channels == in_channels
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + self.drop_path(out)
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from .se_layer import SELayer
class InvertedResidual(BaseModule):
"""Inverted Residual Block.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
mid_channels (int): The input channels of the depthwise convolution.
kernel_size (int): The kernel size of the depthwise convolution.
Default: 3.
stride (int): The stride of the depthwise convolution. Default: 1.
se_cfg (dict): Config dict for se layer. Default: None, which means no
se layer.
with_expand_conv (bool): Use expand conv or not. If set False,
mid_channels must be the same with in_channels.
Default: True.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
with_expand_conv=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.with_se = se_cfg is not None
self.with_expand_conv = with_expand_conv
if self.with_se:
assert isinstance(se_cfg, dict)
if not self.with_expand_conv:
assert mid_channels == in_channels
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + out
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
|
import json
import aioboto3.session
import pytest
import aioboto3
from llama_index.embeddings.bedrock import BedrockEmbedding, Models
EXP_REQUEST = "foo bar baz"
EXP_RESPONSE = {
"embedding": [
0.017410278,
0.040924072,
-0.007507324,
0.09429932,
0.015304565,
]
}
class AsyncMockStreamReader:
async def read(self):
return json.dumps(EXP_RESPONSE).encode()
class AsyncMockClient:
async def __aenter__(self) -> "AsyncMockClient":
return self
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
pass
async def invoke_model(self, *args, **kwargs):
return {"contentType": "application/json", "body": AsyncMockStreamReader()}
class AsyncMockSession:
def __init__(self, *args, **kwargs) -> "AsyncMockSession":
pass
def client(self, *args, **kwargs):
return AsyncMockClient()
@pytest.fixture()
def mock_aioboto3_session(monkeypatch):
monkeypatch.setattr("aioboto3.Session", AsyncMockSession)
@pytest.fixture()
def bedrock_embedding(mock_aioboto3_session):
return BedrockEmbedding(
model_name=Models.TITAN_EMBEDDING,
client=aioboto3.Session().client("bedrock-runtime", region_name="us-east-1"),
)
@pytest.mark.asyncio
async def test_aget_text_embedding(bedrock_embedding):
response = await bedrock_embedding._aget_text_embedding(EXP_REQUEST)
assert response == EXP_RESPONSE["embedding"]
|
import json
import aioboto3.session
import pytest
import aioboto3
from llama_index.embeddings.bedrock import BedrockEmbedding, Models
EXP_REQUEST = "foo bar baz"
EXP_RESPONSE = {
"embedding": [
0.017410278,
0.040924072,
-0.007507324,
0.09429932,
0.015304565,
]
}
class AsyncMockStreamReader:
async def read(self):
return json.dumps(EXP_RESPONSE).encode()
class AsyncMockClient:
async def __aenter__(self) -> "AsyncMockClient":
return self
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
pass
async def invoke_model(self, *args, **kwargs):
return {"contentType": "application/json", "body": AsyncMockStreamReader()}
class AsyncMockSession:
def __init__(self, *args, **kwargs) -> "AsyncMockSession":
pass
def client(self, *args, **kwargs):
return AsyncMockClient()
@pytest.fixture()
def mock_aioboto3_session(monkeypatch):
monkeypatch.setattr("aioboto3.Session", AsyncMockSession)
@pytest.fixture()
def bedrock_embedding(mock_aioboto3_session):
return BedrockEmbedding(
model_name=Models.TITAN_EMBEDDING,
client=aioboto3.Session().client("bedrock-runtime", region_name="us-east-1"),
)
@pytest.mark.asyncio()
async def test_aget_text_embedding(bedrock_embedding):
response = await bedrock_embedding._aget_text_embedding(EXP_REQUEST)
assert response == EXP_RESPONSE["embedding"]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information including virtual
memory, swap memory, and the memory of the current process.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval=50):
try:
from psutil import swap_memory, virtual_memory
self._swap_memory = swap_memory
self._virtual_memory = virtual_memory
except ImportError:
raise ImportError('psutil is not installed, please install it by: '
'pip install psutil')
try:
from memory_profiler import memory_usage
self._memory_usage = memory_usage
except ImportError:
raise ImportError(
'memory_profiler is not installed, please install it by: '
'pip install memory_profiler')
self.interval = interval
def after_iter(self, runner):
if self.every_n_iters(runner, self.interval):
# in Byte
virtual_memory = self._virtual_memory()
swap_memory = self._swap_memory()
# in MB
process_memory = self._memory_usage()[0]
factor = 1024 * 1024
runner.logger.info(
'Memory information '
'available_memory: '
f'{round(virtual_memory.available / factor)} MB, '
'used_memory: '
f'{round(virtual_memory.used / factor)} MB, '
f'memory_utilization: {virtual_memory.percent} %, '
'available_swap_memory: '
f'{round((swap_memory.total - swap_memory.used) / factor)}'
' MB, '
f'used_swap_memory: {round(swap_memory.used / factor)} MB, '
f'swap_memory_utilization: {swap_memory.percent} %, '
'current_process_memory: '
f'{round(process_memory)} MB')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information: virtual memory, swap
memory and memory of current process.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval=50):
try:
from psutil import swap_memory, virtual_memory
self._swap_memory = swap_memory
self._virtual_memory = virtual_memory
except ImportError:
raise ImportError('psutil is not installed, please install it by: '
'pip install psutil')
try:
from memory_profiler import memory_usage
self._memory_usage = memory_usage
except ImportError:
raise ImportError(
'memory_profiler is not installed, please install it by: '
'pip install memory_profiler')
self.interval = interval
def after_iter(self, runner):
if self.every_n_iters(runner, self.interval):
# in Byte
virtual_memory = self._virtual_memory()
swap_memory = self._swap_memory()
# in MB
process_memory = self._memory_usage()[0]
factor = 1024 * 1024
runner.logger.info(
'Memory information '
'available_memory: '
f'{round(virtual_memory.available / factor)} MB, '
'used_memory: '
f'{round(virtual_memory.used / factor)} MB, '
f'memory_utilization: {virtual_memory.percent} %, '
'available_swap_memory: '
f'{round((swap_memory.total - swap_memory.used) / factor)}'
'MB, '
f'used_swap_memory: {round(swap_memory.used / factor)} MB, '
f'swap_memory_utilization: {swap_memory.percent} %, '
'current_process_memory: '
f'{round(process_memory)} MB')
|
_base_ = '../mask_rcnn/mask-rcnn_x101-32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
|
_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
|
# pylint: disable=protected-access
"""Shared typing definition."""
import ctypes
import os
from typing import (
TYPE_CHECKING,
Any,
AnyStr,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
# os.PathLike/string/numpy.array/scipy.sparse/pd.DataFrame/dt.Frame/
# cudf.DataFrame/cupy.array/dlpack
import numpy as np
DataType = Any
FeatureInfo = Sequence[str]
FeatureNames = FeatureInfo
FeatureTypes = FeatureInfo
BoosterParam = Union[List, Dict[str, Any]] # better be sequence
ArrayLike = Any
if TYPE_CHECKING:
PathLike = Union[str, os.PathLike[str]]
else:
PathLike = Union[str, os.PathLike]
CupyT = ArrayLike # maybe need a stub for cupy arrays
NumpyOrCupy = Any
NumpyDType = Union[str, Type[np.number]] # pylint: disable=invalid-name
PandasDType = Any # real type is pandas.core.dtypes.base.ExtensionDtype
FloatCompatible = Union[float, np.float32, np.float64]
# typing.SupportsInt is not suitable here since floating point values are convertible to
# integers as well.
Integer = Union[int, np.integer]
IterationRange = Tuple[Integer, Integer]
# callables
FPreProcCallable = Callable
# ctypes
# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h
c_bst_ulong = ctypes.c_uint64 # pylint: disable=C0103
ModelIn = Union[os.PathLike[AnyStr], bytearray, str]
CTypeT = TypeVar(
"CTypeT",
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_float,
ctypes.c_uint,
ctypes.c_size_t,
)
# supported numeric types
CNumeric = Union[
ctypes.c_float,
ctypes.c_double,
ctypes.c_uint,
ctypes.c_uint64,
ctypes.c_int32,
ctypes.c_int64,
]
# c pointer types
if TYPE_CHECKING:
CStrPtr = ctypes._Pointer[ctypes.c_char]
CStrPptr = ctypes._Pointer[ctypes.c_char_p]
CFloatPtr = ctypes._Pointer[ctypes.c_float]
CNumericPtr = Union[
ctypes._Pointer[ctypes.c_float],
ctypes._Pointer[ctypes.c_double],
ctypes._Pointer[ctypes.c_uint],
ctypes._Pointer[ctypes.c_uint64],
ctypes._Pointer[ctypes.c_int32],
ctypes._Pointer[ctypes.c_int64],
]
else:
CStrPtr = ctypes._Pointer
CStrPptr = ctypes._Pointer
CFloatPtr = ctypes._Pointer
CNumericPtr = Union[
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
]
# The second arg is actually Optional[List[cudf.Series]], skipped for easier type check.
# The cudf Series is the obtained cat codes, preserved in the `DataIter` to prevent it
# being freed.
TransformedData = Tuple[
Any, Optional[List], Optional[FeatureNames], Optional[FeatureTypes]
]
# template parameter
_T = TypeVar("_T")
_F = TypeVar("_F", bound=Callable[..., Any])
|
# pylint: disable=protected-access
"""Shared typing definition."""
import ctypes
import os
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
# os.PathLike/string/numpy.array/scipy.sparse/pd.DataFrame/dt.Frame/
# cudf.DataFrame/cupy.array/dlpack
import numpy as np
DataType = Any
FeatureInfo = Sequence[str]
FeatureNames = FeatureInfo
FeatureTypes = FeatureInfo
BoosterParam = Union[List, Dict[str, Any]] # better be sequence
ArrayLike = Any
PathLike = Union[str, os.PathLike]
CupyT = ArrayLike # maybe need a stub for cupy arrays
NumpyOrCupy = Any
NumpyDType = Union[str, Type[np.number]] # pylint: disable=invalid-name
PandasDType = Any # real type is pandas.core.dtypes.base.ExtensionDtype
FloatCompatible = Union[float, np.float32, np.float64]
# typing.SupportsInt is not suitable here since floating point values are convertible to
# integers as well.
Integer = Union[int, np.integer]
IterationRange = Tuple[Integer, Integer]
# callables
FPreProcCallable = Callable
# ctypes
# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h
c_bst_ulong = ctypes.c_uint64 # pylint: disable=C0103
ModelIn = Union[str, bytearray, os.PathLike]
CTypeT = TypeVar(
"CTypeT",
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_float,
ctypes.c_uint,
ctypes.c_size_t,
)
# supported numeric types
CNumeric = Union[
ctypes.c_float,
ctypes.c_double,
ctypes.c_uint,
ctypes.c_uint64,
ctypes.c_int32,
ctypes.c_int64,
]
# c pointer types
if TYPE_CHECKING:
CStrPtr = ctypes._Pointer[ctypes.c_char]
CStrPptr = ctypes._Pointer[ctypes.c_char_p]
CFloatPtr = ctypes._Pointer[ctypes.c_float]
CNumericPtr = Union[
ctypes._Pointer[ctypes.c_float],
ctypes._Pointer[ctypes.c_double],
ctypes._Pointer[ctypes.c_uint],
ctypes._Pointer[ctypes.c_uint64],
ctypes._Pointer[ctypes.c_int32],
ctypes._Pointer[ctypes.c_int64],
]
else:
CStrPtr = ctypes._Pointer
CStrPptr = ctypes._Pointer
CFloatPtr = ctypes._Pointer
CNumericPtr = Union[
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
]
# The second arg is actually Optional[List[cudf.Series]], skipped for easier type check.
# The cudf Series is the obtained cat codes, preserved in the `DataIter` to prevent it
# being freed.
TransformedData = Tuple[
Any, Optional[List], Optional[FeatureNames], Optional[FeatureTypes]
]
# template parameter
_T = TypeVar("_T")
_F = TypeVar("_F", bound=Callable[..., Any])
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
self._build_at_init()
def call(self, inputs):
return ops.normalize(inputs, axis=self.axis, order=2, epsilon=1e-12)
def compute_output_shape(self, input_shape):
# Ensure axis is always treated as a list
if isinstance(self.axis, int):
axes = [self.axis]
else:
axes = self.axis
for axis in axes:
if axis >= len(input_shape) or axis < -len(input_shape):
raise ValueError(
f"Axis {self.axis} is out of bounds for "
f"input shape {input_shape}."
)
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
self.built = True
def call(self, inputs):
return ops.normalize(inputs, axis=self.axis, order=2, epsilon=1e-12)
def compute_output_shape(self, input_shape):
# Ensure axis is always treated as a list
if isinstance(self.axis, int):
axes = [self.axis]
else:
axes = self.axis
for axis in axes:
if axis >= len(input_shape) or axis < -len(input_shape):
raise ValueError(
f"Axis {self.axis} is out of bounds for "
f"input shape {input_shape}."
)
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
|
import os
import numpy as np
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocumentArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocumentArray[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary_streaming(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocumentArray[MyDoc]()
def _extend_da(num_docs=100):
for _ in range(num_docs):
da.extend(
[
MyDoc(
embedding=np.random.rand(3, 2),
text='hello',
image=ImageDoc(url='aux.png'),
),
]
)
_extend_da()
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocumentArray[MyDoc]()
da_generator = DocumentArray[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
for i, doc in enumerate(da_generator):
assert doc.id == da[i].id
assert doc.text == da[i].text
assert doc.image.url == da[i].image.url
da2.append(doc)
assert len(da2) == 100
|
import pytest
import os
import numpy as np
from docarray import BaseDocument
from docarray.typing import NdArray
from docarray.documents import Image
from docarray import DocumentArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: Image
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocumentArray[MyDoc](
[
MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=Image(url='aux.png')),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=Image()),
]
)
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocumentArray[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary_streaming(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocumentArray[MyDoc]()
def _extend_da(num_docs=100):
for _ in range(num_docs):
da.extend(
[
MyDoc(
embedding=np.random.rand(3, 2),
text='hello',
image=Image(url='aux.png'),
),
]
)
_extend_da()
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocumentArray[MyDoc]()
da_generator = DocumentArray[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
for i, doc in enumerate(da_generator):
assert doc.id == da[i].id
assert doc.text == da[i].text
assert doc.image.url == da[i].image.url
da2.append(doc)
assert len(da2) == 100
|
import pytest
from docarray import DocumentArray
@pytest.fixture
def docs():
docs = DocumentArray.empty(5)
docs[0].text = 'hello'
docs[0].tags['name'] = 'hello'
docs[1].text = 'world'
docs[1].tags['name'] = 'hello'
docs[2].tags['x'] = 0.3
docs[2].tags['y'] = 0.6
docs[3].tags['x'] = 0.8
return docs
def test_empty_filter(docs):
result = docs.find({})
assert len(result) == 5
@pytest.mark.parametrize('filter_api', [True, False])
def test_simple_filter(docs, filter_api):
if filter_api:
method = lambda query: docs.find(filter=query)
else:
method = lambda query: docs.find(query)
result = method({'text': {'$eq': 'hello'}})
assert len(result) == 1
assert result[0].text == 'hello'
result = method({'tags__x': {'$gte': 0.5}})
assert len(result) == 1
assert result[0].tags['x'] == 0.8
result = method({'tags__name': {'$regex': '^h'}})
assert len(result) == 2
assert result[1].id == docs[1].id
result = method({'text': {'$regex': '^h'}})
assert len(result) == 1
assert result[0].id == docs[0].id
result = method({'tags': {'$size': 2}})
assert result[0].id == docs[2].id
result = method({'text': {'$exists': True}})
assert len(result) == 2
result = method({'tensor': {'$exists': True}})
assert len(result) == 0
def test_logic_filter(docs):
result = docs.find({'$or': {'tags__x': {'$gte': 0.1}, 'tags__y': {'$gte': 0.5}}})
assert len(result) == 2
assert result[0].tags['x'] == 0.3 and result[1].tags['x'] == 0.8
result = docs.find({'$or': {'tags__x': {'$gte': 0.1}, 'tags__y': {'$gte': 0.5}}})
assert len(result) == 2
assert result[0].tags['x'] == 0.3
result = docs.find({'tags__x': {'$gte': 0.1, '$lte': 0.5}})
assert len(result) == 1
assert result[0].tags['y'] == 0.6
result = docs.find({'$and': {'tags__x': {'$gte': 0.1}, 'tags__y': {'$gte': 0.5}}})
assert len(result) == 1
assert result[0].tags['y'] == 0.6
result = docs.find({'$not': {'tags__x': {'$gte': 0.5}}})
assert len(result) == 4
assert 'x' not in result[0].tags or result[0].tags['x'] < 0.5
result = docs.find({'$not': {'tags__x': {'$gte': 0.1}, 'tags__y': {'$gte': 0.5}}})
assert len(result) == 4
def test_placehold_filter(docs):
result = docs.find({'text': {'$eq': '{tags__name}'}})
assert len(result) == 1
assert result[0].id == docs[0].id
|
import pytest
from docarray import DocumentArray
@pytest.fixture
def docs():
docs = DocumentArray.empty(5)
docs[0].text = 'hello'
docs[0].tags['name'] = 'hello'
docs[1].text = 'world'
docs[1].tags['name'] = 'hello'
docs[2].tags['x'] = 0.3
docs[2].tags['y'] = 0.6
docs[3].tags['x'] = 0.8
return docs
def test_empty_filter(docs):
result = docs.find({})
assert len(result) == 5
def test_simple_filter(docs):
result = docs.find({'text': {'$eq': 'hello'}})
assert len(result) == 1
assert result[0].text == 'hello'
result = docs.find({'tags__x': {'$gte': 0.5}})
assert len(result) == 1
assert result[0].tags['x'] == 0.8
result = docs.find({'tags__name': {'$regex': '^h'}})
assert len(result) == 2
assert result[1].id == docs[1].id
result = docs.find({'text': {'$regex': '^h'}})
assert len(result) == 1
assert result[0].id == docs[0].id
result = docs.find({'tags': {'$size': 2}})
assert result[0].id == docs[2].id
result = docs.find({'text': {'$exists': True}})
assert len(result) == 2
result = docs.find({'tensor': {'$exists': True}})
assert len(result) == 0
def test_logic_filter(docs):
result = docs.find({'$or': {'tags__x': {'$gte': 0.1}, 'tags__y': {'$gte': 0.5}}})
assert len(result) == 2
assert result[0].tags['x'] == 0.3 and result[1].tags['x'] == 0.8
result = docs.find({'$or': {'tags__x': {'$gte': 0.1}, 'tags__y': {'$gte': 0.5}}})
assert len(result) == 2
assert result[0].tags['x'] == 0.3
result = docs.find({'tags__x': {'$gte': 0.1, '$lte': 0.5}})
assert len(result) == 1
assert result[0].tags['y'] == 0.6
result = docs.find({'$and': {'tags__x': {'$gte': 0.1}, 'tags__y': {'$gte': 0.5}}})
assert len(result) == 1
assert result[0].tags['y'] == 0.6
result = docs.find({'$not': {'tags__x': {'$gte': 0.5}}})
assert len(result) == 4
assert 'x' not in result[0].tags or result[0].tags['x'] < 0.5
result = docs.find({'$not': {'tags__x': {'$gte': 0.1}, 'tags__y': {'$gte': 0.5}}})
assert len(result) == 4
def test_placehold_filter(docs):
result = docs.find({'text': {'$eq': '{tags__name}'}})
assert len(result) == 1
assert result[0].id == docs[0].id
|
from typing import List
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray
from ...audioclip_text import AudioCLIPTextEncoder
_EMBEDDING_DIM = 1024
def test_encoding_cpu():
enc = AudioCLIPTextEncoder(device='cpu')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='GPU is needed for this test')
def test_encoding_gpu():
enc = AudioCLIPTextEncoder(device='cuda')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(pytest.lazy_fixture('docs_with_text'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_text'),
[['r', 0], ['c', 10], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_text'),
[['r', 0], ['c', 0], ['cc', 10]],
'cc',
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
embeddings = (
DocumentArray(res).traverse_flat([path]).get_attributes('embedding')
)
for emb in embeddings:
if emb is None:
return False
return len(embeddings) == count
return validate
encoder = AudioCLIPTextEncoder(default_traversal_paths=[traversal_path])
encoder.encode(docs, {'traversal_paths': [traversal_path]})
assert validate_traversal(docs_per_path)(docs)
def test_encodes_semantic_meaning():
sentences = dict()
sentences["A"] = "Hello, my name is Michael."
sentences["B"] = "Today we are going to Disney World."
sentences["C"] = "There are animals on the road"
sentences["D"] = "A dog is running down the road"
encoder = AudioCLIPTextEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist("C", "D")
assert small_distance < dist("C", "B")
assert small_distance < dist("C", "A")
assert small_distance < dist("B", "A")
def test_multiple_traversal_paths():
sentences = list()
sentences.append('Hello, my name is Michael.')
sentences.append('Today we are going to Disney World.')
sentences.append('There are animals on the road')
sentences.append('A dog is running down the road')
docs = DocumentArray([Document(text=sentence) for sentence in sentences])
for index, sent in enumerate(sentences):
docs[index].chunks.append(Document(text=sent))
docs[index].chunks[0].chunks.append(Document(text=sentences[3 - index]))
encoder = AudioCLIPTextEncoder(default_traversal_paths=['r', 'c', 'cc'])
encoder.encode(docs, {})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
assert doc.chunks[0].embedding.shape == (_EMBEDDING_DIM,)
assert doc.chunks[0].chunks[0].embedding.shape == (_EMBEDDING_DIM,)
def test_no_docs():
encoder = AudioCLIPTextEncoder()
encoder.encode(None, {})
encoder.encode(DocumentArray(), {})
|
from typing import List
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray
from jinahub.encoder.audioclip_text import AudioCLIPTextEncoder
_EMBEDDING_DIM = 1024
def test_encoding_cpu():
enc = AudioCLIPTextEncoder(device='cpu')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.skipif(not torch.cuda.is_available(), reason='GPU is needed for this test')
def test_encoding_gpu():
enc = AudioCLIPTextEncoder(device='cuda')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(pytest.lazy_fixture('docs_with_text'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_text'),
[['r', 0], ['c', 10], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_text'),
[['r', 0], ['c', 0], ['cc', 10]],
'cc',
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
embeddings = (
DocumentArray(res).traverse_flat([path]).get_attributes('embedding')
)
for emb in embeddings:
if emb is None:
return False
return len(embeddings) == count
return validate
encoder = AudioCLIPTextEncoder(default_traversal_paths=[traversal_path])
encoder.encode(docs, {'traversal_paths': [traversal_path]})
assert validate_traversal(docs_per_path)(docs)
def test_encodes_semantic_meaning():
sentences = dict()
sentences["A"] = "Hello, my name is Michael."
sentences["B"] = "Today we are going to Disney World."
sentences["C"] = "There are animals on the road"
sentences["D"] = "A dog is running down the road"
encoder = AudioCLIPTextEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist("C", "D")
assert small_distance < dist("C", "B")
assert small_distance < dist("C", "A")
assert small_distance < dist("B", "A")
def test_multiple_traversal_paths():
sentences = list()
sentences.append('Hello, my name is Michael.')
sentences.append('Today we are going to Disney World.')
sentences.append('There are animals on the road')
sentences.append('A dog is running down the road')
docs = DocumentArray([Document(text=sentence) for sentence in sentences])
for index, sent in enumerate(sentences):
docs[index].chunks.append(Document(text=sent))
docs[index].chunks[0].chunks.append(Document(text=sentences[3 - index]))
encoder = AudioCLIPTextEncoder(default_traversal_paths=['r', 'c', 'cc'])
encoder.encode(docs, {})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
assert doc.chunks[0].embedding.shape == (_EMBEDDING_DIM,)
assert doc.chunks[0].chunks[0].embedding.shape == (_EMBEDDING_DIM,)
def test_no_docs():
encoder = AudioCLIPTextEncoder()
encoder.encode(None, {})
encoder.encode(DocumentArray(), {})
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "eurosat"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=(
"AnnualCrop",
"Forest",
"HerbaceousVegetation",
"Highway",
"Industrial",
"Pasture",
"PermanentCrop",
"Residential",
"River",
"SeaLake",
)
)
@register_dataset(NAME)
class EuroSAT(Dataset):
"""EuroSAT Dataset.
homepage="https://github.com/phelber/eurosat",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://madm.dfki.de/files/sentinel/EuroSAT.zip",
sha256="8ebea626349354c5328b142b96d0430e647051f26efc2dc974c843f25ecf70bd",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 27_000
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.features import EncodedImage, Label
from .._api import register_dataset, register_info
NAME = "eurosat"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=(
"AnnualCrop",
"Forest",
"HerbaceousVegetation",
"Highway",
"Industrial",
"Pasture",
"PermanentCrop",
"Residential",
"River",
"SeaLake",
)
)
@register_dataset(NAME)
class EuroSAT(Dataset):
"""EuroSAT Dataset.
homepage="https://github.com/phelber/eurosat",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://madm.dfki.de/files/sentinel/EuroSAT.zip",
sha256="8ebea626349354c5328b142b96d0430e647051f26efc2dc974c843f25ecf70bd",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 27_000
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import Dict
import numpy as np
import pytest
from image_tf_encoder import ImageTFEncoder
from jina import Document, DocumentArray, Executor
input_dim = 336
target_output_dim = 1280
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_name == 'MobileNetV2'
def test_encoding_results():
num_doc = 2
test_data = np.random.rand(num_doc, input_dim, input_dim, 3)
doc = DocumentArray()
for i in range(num_doc):
doc.append(Document(blob=test_data[i]))
encoder = ImageTFEncoder()
encoder.encode(doc, parameters={})
assert len(doc) == num_doc
for i in range(num_doc):
assert doc[i].embedding.shape == (target_output_dim,)
def test_image_results(test_images: Dict[str, np.array]):
embeddings = {}
encoder = ImageTFEncoder()
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
assert docs[0].embedding.shape == (target_output_dim,)
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
@pytest.mark.gpu
def test_image_results_gpu(test_images: Dict[str, np.array]):
num_doc = 2
test_data = np.random.rand(num_doc, input_dim, input_dim, 3)
doc = DocumentArray()
for i in range(num_doc):
doc.append(Document(blob=test_data[i]))
encoder = ImageTFEncoder(device='/GPU:0')
encoder.encode(doc, parameters={})
assert len(doc) == num_doc
for i in range(num_doc):
assert doc[i].embedding.shape == (target_output_dim,)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import Dict
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...image_tf_encoder import ImageTFEncoder
input_dim = 336
target_output_dim = 1280
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_name == 'MobileNetV2'
def test_encoding_results():
num_doc = 2
test_data = np.random.rand(num_doc, input_dim, input_dim, 3)
doc = DocumentArray()
for i in range(num_doc):
doc.append(Document(blob=test_data[i]))
encoder = ImageTFEncoder()
encoder.encode(doc, parameters={})
assert len(doc) == num_doc
for i in range(num_doc):
assert doc[i].embedding.shape == (target_output_dim,)
def test_image_results(test_images: Dict[str, np.array]):
embeddings = {}
encoder = ImageTFEncoder()
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
assert docs[0].embedding.shape == (target_output_dim,)
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
@pytest.mark.gpu
def test_image_results_gpu(test_images: Dict[str, np.array]):
num_doc = 2
test_data = np.random.rand(num_doc, input_dim, input_dim, 3)
doc = DocumentArray()
for i in range(num_doc):
doc.append(Document(blob=test_data[i]))
encoder = ImageTFEncoder(device='/GPU:0')
encoder.encode(doc, parameters={})
assert len(doc) == num_doc
for i in range(num_doc):
assert doc[i].embedding.shape == (target_output_dim,)
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ParquetConfig(datasets.BuilderConfig):
"""BuilderConfig for Parquet."""
batch_size: Optional[int] = None
columns: Optional[List[str]] = None
features: Optional[datasets.Features] = None
class Parquet(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ParquetConfig
def _info(self):
if (
self.config.columns is not None
and self.config.features is not None
and set(self.config.columns) != set(self.config.features)
):
raise ValueError(
"The columns and features argument must contain the same columns, but got ",
f"{self.config.columns} and {self.config.features}",
)
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
if self.config.columns is not None and set(self.config.columns) != set(self.info.features):
self.info.features = datasets.Features(
{col: feat for col, feat in self.info.features.items() if col in self.config.columns}
)
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
if self.config.features is not None and self.config.columns is not None:
if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
parquet_file = pq.ParquetFile(f)
if parquet_file.metadata.num_row_groups > 0:
batch_size = self.config.batch_size or parquet_file.metadata.row_group(0).num_rows
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=batch_size, columns=self.config.columns)
):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class ParquetConfig(datasets.BuilderConfig):
"""BuilderConfig for Parquet."""
batch_size: Optional[int] = None
columns: Optional[List[str]] = None
features: Optional[datasets.Features] = None
class Parquet(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = ParquetConfig
def _info(self):
if (
self.config.columns is not None
and self.config.features is not None
and set(self.config.columns) != set(self.config.features)
):
raise ValueError(
"The columns and features argument must contain the same columns, but got ",
f"{self.config.columns} and {self.config.features}",
)
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""We handle string, list and dicts in datafiles"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
files = [dl_manager.iter_files(file) for file in files]
# Infer features if they are stored in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(files):
with open(file, "rb") as f:
self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))
break
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
if self.config.columns is not None and set(self.config.columns) != set(self.info.features):
self.info.features = datasets.Features(
{col: feat for col, feat in self.info.features.items() if col in self.config.columns}
)
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
return pa_table
def _generate_tables(self, files):
if self.config.features is not None and self.config.columns is not None:
if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
)
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
with open(file, "rb") as f:
parquet_file = pq.ParquetFile(f)
if parquet_file.metadata.num_row_groups > 0:
batch_size = self.config.batch_size or parquet_file.metadata.row_group(0).num_rows
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=batch_size, columns=self.config.columns)
):
pa_table = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
raise
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models.dense_heads import NASFCOSHead
class TestNASFCOSHead(TestCase):
def test_nasfcos_head_loss(self):
"""Tests nasfcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
nasfcos_head = NASFCOSHead(
num_classes=4,
in_channels=2, # the same as `deform_groups` in dconv3x3_config
feat_channels=2,
norm_cfg=None)
# Nasfcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 2, s // stride[1], s // stride[0]).float()
for stride in nasfcos_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = nasfcos_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = nasfcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
empty_ctr_loss = empty_gt_losses['loss_centerness'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = nasfcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
onegt_ctr_loss = one_gt_losses['loss_centerness'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0,
'centerness loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet.models.dense_heads import NASFCOSHead
class TestNASFCOSHead(TestCase):
def test_nasfcos_head_loss(self):
"""Tests nasfcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
nasfcos_head = NASFCOSHead(
num_classes=4,
in_channels=2, # the same as `deform_groups` in dconv3x3_config
feat_channels=2,
norm_cfg=None)
# Nasfcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 2, s // stride[1], s // stride[0]).float()
for stride in nasfcos_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = nasfcos_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = nasfcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
empty_ctr_loss = empty_gt_losses['loss_centerness'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = nasfcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
onegt_ctr_loss = one_gt_losses['loss_centerness'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0,
'centerness loss should be non-zero')
|
import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(dim=axis, index=torch.arange(length, device=array.device))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[..., :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(dim=axis, index=torch.arange(length, device=array.device))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[:, :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
import io
import warnings
from abc import ABC
import numpy as np
from typing_extensions import TYPE_CHECKING
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
if TYPE_CHECKING:
from docarray.typing.bytes.image_bytes import ImageBytes
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> 'ImageBytes':
"""
Convert image tensor to ImageBytes.
:param format: the image format use to store the image, can be 'PNG' , 'JPG' ...
:return: an ImageBytes object
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
if format == 'jpg':
format = 'jpeg' # unify it to ISO standard
tensor = self.get_comp_backend().to_numpy(self)
mode = 'RGB' if tensor.ndim == 3 else 'L'
pil_image = PILImage.fromarray(tensor, mode=mode)
with io.BytesIO() as buffer:
pil_image.save(buffer, format=format)
img_byte_arr = buffer.getvalue()
from docarray.typing.bytes.image_bytes import ImageBytes
return ImageBytes(img_byte_arr)
def save(self, file_path: str) -> None:
"""
Save image tensor to an image file.
:param file_path: path to an image file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
comp_backend = self.get_comp_backend()
np_img = comp_backend.to_numpy(self).astype(np.uint8)
pil_img = PILImage.fromarray(np_img)
pil_img.save(file_path)
def display(self) -> None:
"""
Display image data from tensor in notebook.
"""
if is_notebook():
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
np_array = self.get_comp_backend().to_numpy(self)
img = PILImage.fromarray(np_array)
from IPython.display import display
display(img)
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import io
import warnings
from abc import ABC
import numpy as np
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> bytes:
"""
Convert image tensor to bytes.
:param format: the image format use to store the image, can be 'PNG' , 'JPG' ...
:return: bytes
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
if format == 'jpg':
format = 'jpeg' # unify it to ISO standard
tensor = self.get_comp_backend().to_numpy(self)
mode = 'RGB' if tensor.ndim == 3 else 'L'
pil_image = PILImage.fromarray(tensor, mode=mode)
with io.BytesIO() as buffer:
pil_image.save(buffer, format=format)
img_byte_arr = buffer.getvalue()
return img_byte_arr
def save(self, file_path: str) -> None:
"""
Save image tensor to an image file.
:param file_path: path to an image file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
comp_backend = self.get_comp_backend()
np_img = comp_backend.to_numpy(self).astype(np.uint8)
pil_img = PILImage.fromarray(np_img)
pil_img.save(file_path)
def display(self) -> None:
"""
Display image data from tensor in notebook.
"""
if is_notebook():
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
np_array = self.get_comp_backend().to_numpy(self)
img = PILImage.fromarray(np_array)
from IPython.display import display
display(img)
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import os
import shutil
import subprocess
import numpy as np
import PIL.Image as Image
import pytest
from jina import Document, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(uri=os.path.join(cur_dir, '..', 'test_data', 'test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
yield doc
@pytest.mark.parametrize(
'model_name', ['R50x1', 'R101x1', 'R50x3', 'R101x3'] # , 'R152x4']
)
@pytest.mark.parametrize('dataset', ['Imagenet1k', 'Imagenet21k'])
def test_all_models(model_name: str, dataset: str):
shutil.rmtree('pretrained', ignore_errors=True)
os.environ['TRANSFER_MODEL_NAME'] = f'{dataset}/{model_name}'
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as flow:
data = flow.post(
on='/index',
inputs=data_generator(100),
request_size=10,
return_results=True,
)
docs = data[0].docs
for doc in docs:
assert doc.embedding is not None
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"/GPU:0"',
],
timeout=30,
check=True,
)
|
import os
import shutil
import pytest
import PIL.Image as Image
import numpy as np
from jina import Flow, Document
cur_dir = os.path.dirname(os.path.abspath(__file__))
from ...big_transfer import BigTransferEncoder
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(
uri=os.path.join(cur_dir, '..', 'test_data', 'test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
yield doc
@pytest.mark.parametrize(
'model_name', ['R50x1', 'R101x1', 'R50x3', 'R101x3'] #, 'R152x4']
)
@pytest.mark.parametrize(
'dataset', ['Imagenet1k', 'Imagenet21k']
)
def test_all_models(model_name: str, dataset: str):
shutil.rmtree('pretrained', ignore_errors=True)
os.environ['TRANSFER_MODEL_NAME'] = f'{dataset}/{model_name}'
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as flow:
data = flow.post(on='/index', inputs=data_generator(100),
request_size=10, return_results=True)
docs = data[0].docs
for doc in docs:
assert doc.embedding is not None
|
_base_ = './grid-rcnn_x101-32x4d_fpn_gn-head_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
"""ReAct output parser."""
import re
from typing import Tuple
from llama_index.core.agent.react.types import (
ActionReasoningStep,
BaseReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.output_parsers.utils import extract_json_str
from llama_index.core.types import BaseOutputParser
def extract_tool_use(input_text: str) -> Tuple[str, str, str]:
pattern = (
r"\s*Thought: (.*?)\n+Action: ([^\n\(\) ]+).*?\n+Action Input: .*?(\{.*\})"
)
match = re.search(pattern, input_text, re.DOTALL)
if not match:
raise ValueError(f"Could not extract tool use from input text: {input_text}")
thought = match.group(1).strip()
action = match.group(2).strip()
action_input = match.group(3).strip()
return thought, action, action_input
def action_input_parser(json_str: str) -> dict:
processed_string = re.sub(r"(?<!\w)\'|\'(?!\w)", '"', json_str)
pattern = r'"(\w+)":\s*"([^"]*)"'
matches = re.findall(pattern, processed_string)
return dict(matches)
def extract_final_response(input_text: str) -> Tuple[str, str]:
pattern = r"\s*Thought:(.*?)Answer:(.*?)(?:$)"
match = re.search(pattern, input_text, re.DOTALL)
if not match:
raise ValueError(
f"Could not extract final answer from input text: {input_text}"
)
thought = match.group(1).strip()
answer = match.group(2).strip()
return thought, answer
def parse_action_reasoning_step(output: str) -> ActionReasoningStep:
"""
Parse an action reasoning step from the LLM output.
"""
# Weaker LLMs may generate ReActAgent steps whose Action Input are horrible JSON strings.
# `dirtyjson` is more lenient than `json` in parsing JSON strings.
import dirtyjson as json
thought, action, action_input = extract_tool_use(output)
json_str = extract_json_str(action_input)
# First we try json, if this fails we use ast
try:
action_input_dict = json.loads(json_str)
except Exception:
action_input_dict = action_input_parser(json_str)
return ActionReasoningStep(
thought=thought, action=action, action_input=action_input_dict
)
class ReActOutputParser(BaseOutputParser):
"""ReAct Output parser."""
def parse(self, output: str, is_streaming: bool = False) -> BaseReasoningStep:
"""
Parse output from ReAct agent.
We expect the output to be in one of the following formats:
1. If the agent need to use a tool to answer the question:
```
Thought: <thought>
Action: <action>
Action Input: <action_input>
```
2. If the agent can answer the question without any tools:
```
Thought: <thought>
Answer: <answer>
```
"""
if "Thought:" not in output:
# NOTE: handle the case where the agent directly outputs the answer
# instead of following the thought-answer format
return ResponseReasoningStep(
thought="(Implicit) I can answer without any more tools!",
response=output,
is_streaming=is_streaming,
)
# An "Action" should take priority over an "Answer"
if "Action:" in output:
return parse_action_reasoning_step(output)
if "Answer:" in output:
thought, answer = extract_final_response(output)
return ResponseReasoningStep(
thought=thought, response=answer, is_streaming=is_streaming
)
raise ValueError(f"Could not parse output: {output}")
def format(self, output: str) -> str:
"""Format a query with structured output formatting instructions."""
raise NotImplementedError
|
"""ReAct output parser."""
import re
from typing import Tuple
from llama_index.core.agent.react.types import (
ActionReasoningStep,
BaseReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.output_parsers.utils import extract_json_str
from llama_index.core.types import BaseOutputParser
def extract_tool_use(input_text: str) -> Tuple[str, str, str]:
pattern = (
r"\s*Thought: (.*?)\n+Action: ([^\n\(\) ]+).*?\n+Action Input: .*?(\{.*\})"
)
match = re.search(pattern, input_text, re.DOTALL)
if not match:
raise ValueError(f"Could not extract tool use from input text: {input_text}")
thought = match.group(1).strip()
action = match.group(2).strip()
action_input = match.group(3).strip()
return thought, action, action_input
def action_input_parser(json_str: str) -> dict:
processed_string = re.sub(r"(?<!\w)\'|\'(?!\w)", '"', json_str)
pattern = r'"(\w+)":\s*"([^"]*)"'
matches = re.findall(pattern, processed_string)
return dict(matches)
def extract_final_response(input_text: str) -> Tuple[str, str]:
pattern = r"\s*Thought:(.*?)Answer:(.*?)(?:$)"
match = re.search(pattern, input_text, re.DOTALL)
if not match:
raise ValueError(
f"Could not extract final answer from input text: {input_text}"
)
thought = match.group(1).strip()
answer = match.group(2).strip()
return thought, answer
def parse_action_reasoning_step(output: str) -> ActionReasoningStep:
"""
Parse an action reasoning step from the LLM output.
"""
# Weaker LLMs may generate ReActAgent steps whose Action Input are horrible JSON strings.
# `dirtyjson` is more lenient than `json` in parsing JSON strings.
import dirtyjson as json
thought, action, action_input = extract_tool_use(output)
json_str = extract_json_str(action_input)
# First we try json, if this fails we use ast
try:
action_input_dict = json.loads(json_str)
except Exception:
action_input_dict = action_input_parser(json_str)
return ActionReasoningStep(
thought=thought, action=action, action_input=action_input_dict
)
class ReActOutputParser(BaseOutputParser):
"""ReAct Output parser."""
def parse(self, output: str, is_streaming: bool = False) -> BaseReasoningStep:
"""
Parse output from ReAct agent.
We expect the output to be in one of the following formats:
1. If the agent need to use a tool to answer the question:
```
Thought: <thought>
Action: <action>
Action Input: <action_input>
```
2. If the agent can answer the question without any tools:
```
Thought: <thought>
Answer: <answer>
```
"""
if "Thought:" not in output:
# NOTE: handle the case where the agent directly outputs the answer
# instead of following the thought-answer format
return ResponseReasoningStep(
thought="(Implicit) I can answer without any more tools!",
response=output,
is_streaming=is_streaming,
)
# An "Action" should take priority over an "Answer"
if "Action:" in output:
return parse_action_reasoning_step(output)
if "Answer:" in output:
thought, answer = extract_final_response(output)
return ResponseReasoningStep(
thought=thought, response=answer, is_streaming=is_streaming
)
raise ValueError(f"Could not parse output: {output}")
def format(self, output: str) -> str:
"""Format a query with structured output formatting instructions."""
raise NotImplementedError
|
"""Output parsers using Pydantic."""
import json
from typing import Annotated, Generic, Optional
import pydantic
from pydantic import SkipValidation
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.pydantic import (
PYDANTIC_MAJOR_VERSION,
PydanticBaseModel,
TBaseModel,
)
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
"""Parse an output using a pydantic model."""
pydantic_object: Annotated[type[TBaseModel], SkipValidation()] # type: ignore
"""The pydantic model to parse."""
def _parse_obj(self, obj: dict) -> TBaseModel:
if PYDANTIC_MAJOR_VERSION == 2:
try:
if issubclass(self.pydantic_object, pydantic.BaseModel):
return self.pydantic_object.model_validate(obj)
elif issubclass(self.pydantic_object, pydantic.v1.BaseModel):
return self.pydantic_object.parse_obj(obj)
else:
msg = f"Unsupported model version for PydanticOutputParser: \
{self.pydantic_object.__class__}"
raise OutputParserException(msg)
except (pydantic.ValidationError, pydantic.v1.ValidationError) as e:
raise self._parser_exception(e, obj) from e
else: # pydantic v1
try:
return self.pydantic_object.parse_obj(obj)
except pydantic.ValidationError as e:
raise self._parser_exception(e, obj) from e
def _parser_exception(
self, e: Exception, json_object: dict
) -> OutputParserException:
json_string = json.dumps(json_object)
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {json_string}. Got: {e}"
return OutputParserException(msg, llm_output=json_string)
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Optional[TBaseModel]:
"""Parse the result of an LLM call to a pydantic object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
Defaults to False.
Returns:
The parsed pydantic object.
"""
try:
json_object = super().parse_result(result)
return self._parse_obj(json_object)
except OutputParserException:
if partial:
return None
raise
def parse(self, text: str) -> TBaseModel:
"""Parse the output of an LLM call to a pydantic object.
Args:
text: The output of the LLM call.
Returns:
The parsed pydantic object.
"""
return super().parse(text)
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self.pydantic_object.model_json_schema().items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return _PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "pydantic"
@property
@override
def OutputType(self) -> type[TBaseModel]:
"""Return the pydantic model."""
return self.pydantic_object
PydanticOutputParser.model_rebuild()
_PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}
the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
Here is the output schema:
```
{schema}
```""" # noqa: E501
# Re-exporting types for backwards compatibility
__all__ = [
"PydanticBaseModel",
"PydanticOutputParser",
"TBaseModel",
]
|
import json
from typing import Annotated, Generic, Optional
import pydantic
from pydantic import SkipValidation
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.pydantic import (
PYDANTIC_MAJOR_VERSION,
PydanticBaseModel,
TBaseModel,
)
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
"""Parse an output using a pydantic model."""
pydantic_object: Annotated[type[TBaseModel], SkipValidation()] # type: ignore
"""The pydantic model to parse."""
def _parse_obj(self, obj: dict) -> TBaseModel:
if PYDANTIC_MAJOR_VERSION == 2:
try:
if issubclass(self.pydantic_object, pydantic.BaseModel):
return self.pydantic_object.model_validate(obj)
elif issubclass(self.pydantic_object, pydantic.v1.BaseModel):
return self.pydantic_object.parse_obj(obj)
else:
msg = f"Unsupported model version for PydanticOutputParser: \
{self.pydantic_object.__class__}"
raise OutputParserException(msg)
except (pydantic.ValidationError, pydantic.v1.ValidationError) as e:
raise self._parser_exception(e, obj) from e
else: # pydantic v1
try:
return self.pydantic_object.parse_obj(obj)
except pydantic.ValidationError as e:
raise self._parser_exception(e, obj) from e
def _parser_exception(
self, e: Exception, json_object: dict
) -> OutputParserException:
json_string = json.dumps(json_object)
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {json_string}. Got: {e}"
return OutputParserException(msg, llm_output=json_string)
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Optional[TBaseModel]:
"""Parse the result of an LLM call to a pydantic object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
Defaults to False.
Returns:
The parsed pydantic object.
"""
try:
json_object = super().parse_result(result)
return self._parse_obj(json_object)
except OutputParserException:
if partial:
return None
raise
def parse(self, text: str) -> TBaseModel:
"""Parse the output of an LLM call to a pydantic object.
Args:
text: The output of the LLM call.
Returns:
The parsed pydantic object.
"""
return super().parse(text)
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self.pydantic_object.model_json_schema().items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return _PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "pydantic"
@property
@override
def OutputType(self) -> type[TBaseModel]:
"""Return the pydantic model."""
return self.pydantic_object
PydanticOutputParser.model_rebuild()
_PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}
the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
Here is the output schema:
```
{schema}
```""" # noqa: E501
# Re-exporting types for backwards compatibility
__all__ = [
"PydanticBaseModel",
"PydanticOutputParser",
"TBaseModel",
]
|
_base_ = ['faster-rcnn_r50_fpn_32xb2-1x_openimages.py']
model = dict(
roi_head=dict(bbox_head=dict(num_classes=500)),
test_cfg=dict(rcnn=dict(score_thr=0.01)))
# dataset settings
dataset_type = 'OpenImagesChallengeDataset'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-train-detection-bbox.txt',
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-train-metas.pkl'))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-validation-detection-bbox.txt',
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file='challenge2019/challenge-2019-validation-'
'detection-human-imagelabels.csv'))
test_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-validation-detection-bbox.txt',
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file='challenge2019/challenge-2019-validation-'
'detection-human-imagelabels.csv'))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py']
model = dict(
roi_head=dict(bbox_head=dict(num_classes=500)),
test_cfg=dict(rcnn=dict(score_thr=0.01)))
# dataset settings
dataset_type = 'OpenImagesChallengeDataset'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-train-detection-bbox.txt',
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-train-metas.pkl'))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-validation-detection-bbox.txt',
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file='challenge2019/challenge-2019-validation-'
'detection-human-imagelabels.csv'))
test_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-validation-detection-bbox.txt',
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file='challenge2019/challenge-2019-validation-'
'detection-human-imagelabels.csv'))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
import sys
from typing import Any, Optional
from unittest.mock import MagicMock, patch
from langchain_community.embeddings import GPT4AllEmbeddings
_GPT4ALL_MODEL_NAME = "all-MiniLM-L6-v2.gguf2.f16.gguf"
_GPT4ALL_NTHREADS = 4
_GPT4ALL_DEVICE = "gpu"
_GPT4ALL_KWARGS = {"allow_download": False}
class MockEmbed4All(MagicMock):
"""Mock Embed4All class."""
def __init__(
self,
model_name: Optional[str] = None,
*,
n_threads: Optional[int] = None,
device: Optional[str] = None,
**kwargs: Any,
):
assert model_name == _GPT4ALL_MODEL_NAME
class MockGpt4AllPackage(MagicMock):
"""Mock gpt4all package."""
Embed4All = MockEmbed4All
def test_create_gpt4all_embeddings_no_kwargs() -> None:
"""Test fix for #25119"""
with patch.dict(sys.modules, {"gpt4all": MockGpt4AllPackage()}):
embedding = GPT4AllEmbeddings( # type: ignore[call-arg]
model_name=_GPT4ALL_MODEL_NAME,
n_threads=_GPT4ALL_NTHREADS,
device=_GPT4ALL_DEVICE,
)
assert embedding.model_name == _GPT4ALL_MODEL_NAME
assert embedding.n_threads == _GPT4ALL_NTHREADS
assert embedding.device == _GPT4ALL_DEVICE
assert embedding.gpt4all_kwargs == {}
assert isinstance(embedding.client, MockEmbed4All)
def test_create_gpt4all_embeddings_with_kwargs() -> None:
with patch.dict(sys.modules, {"gpt4all": MockGpt4AllPackage()}):
embedding = GPT4AllEmbeddings( # type: ignore[call-arg]
model_name=_GPT4ALL_MODEL_NAME,
n_threads=_GPT4ALL_NTHREADS,
device=_GPT4ALL_DEVICE,
gpt4all_kwargs=_GPT4ALL_KWARGS,
)
assert embedding.model_name == _GPT4ALL_MODEL_NAME
assert embedding.n_threads == _GPT4ALL_NTHREADS
assert embedding.device == _GPT4ALL_DEVICE
assert embedding.gpt4all_kwargs == _GPT4ALL_KWARGS
assert isinstance(embedding.client, MockEmbed4All)
|
import sys
from typing import Any, Optional
from unittest.mock import MagicMock, patch
from langchain_community.embeddings import GPT4AllEmbeddings
_GPT4ALL_MODEL_NAME = "all-MiniLM-L6-v2.gguf2.f16.gguf"
_GPT4ALL_NTHREADS = 4
_GPT4ALL_DEVICE = "gpu"
_GPT4ALL_KWARGS = {"allow_download": False}
class MockEmbed4All(MagicMock):
"""Mock Embed4All class."""
def __init__(
self,
model_name: Optional[str] = None,
*,
n_threads: Optional[int] = None,
device: Optional[str] = None,
**kwargs: Any,
): # type: ignore[no-untyped-def]
assert model_name == _GPT4ALL_MODEL_NAME
class MockGpt4AllPackage(MagicMock):
"""Mock gpt4all package."""
Embed4All = MockEmbed4All
def test_create_gpt4all_embeddings_no_kwargs() -> None:
"""Test fix for #25119"""
with patch.dict(sys.modules, {"gpt4all": MockGpt4AllPackage()}):
embedding = GPT4AllEmbeddings( # type: ignore[call-arg]
model_name=_GPT4ALL_MODEL_NAME,
n_threads=_GPT4ALL_NTHREADS,
device=_GPT4ALL_DEVICE,
)
assert embedding.model_name == _GPT4ALL_MODEL_NAME
assert embedding.n_threads == _GPT4ALL_NTHREADS
assert embedding.device == _GPT4ALL_DEVICE
assert embedding.gpt4all_kwargs == {}
assert isinstance(embedding.client, MockEmbed4All)
def test_create_gpt4all_embeddings_with_kwargs() -> None:
with patch.dict(sys.modules, {"gpt4all": MockGpt4AllPackage()}):
embedding = GPT4AllEmbeddings( # type: ignore[call-arg]
model_name=_GPT4ALL_MODEL_NAME,
n_threads=_GPT4ALL_NTHREADS,
device=_GPT4ALL_DEVICE,
gpt4all_kwargs=_GPT4ALL_KWARGS,
)
assert embedding.model_name == _GPT4ALL_MODEL_NAME
assert embedding.n_threads == _GPT4ALL_NTHREADS
assert embedding.device == _GPT4ALL_DEVICE
assert embedding.gpt4all_kwargs == _GPT4ALL_KWARGS
assert isinstance(embedding.client, MockEmbed4All)
|
"""Init file of LlamaIndex."""
__version__ = "0.12.32"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.31"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.utils.misc import get_box_tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes, bbox2distance, distance2bbox
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, clip_border=True, **kwargs):
super().__init__(**kwargs)
self.clip_border = clip_border
def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor or :obj:`BaseBoxes`): Shape (N, 4), The format
is "xyxy"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4).
"""
gt_bboxes = get_box_tensor(gt_bboxes)
assert points.size(0) == gt_bboxes.size(0)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(self, points, pred_bboxes, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Union[Tensor, :obj:`BaseBoxes`]: Boxes with shape (N, 4) or
(B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
bboxes = distance2bbox(points, pred_bboxes, max_shape)
if self.use_box_type:
bboxes = HorizontalBoxes(bboxes)
return bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import bbox2distance, distance2bbox
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, clip_border=True):
super(BaseBBoxCoder, self).__init__()
self.clip_border = clip_border
def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor): Shape (N, 4), The format is "xyxy"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4).
"""
assert points.size(0) == gt_bboxes.size(0)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(self, points, pred_bboxes, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Tensor: Boxes with shape (N, 4) or (B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
return distance2bbox(points, pred_bboxes, max_shape)
|
_base_ = './tood_r50_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './tood_r50_fpn_mstrain_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
if not name:
return {key: maybe_to_float(value) for key, value in metrics.items()}
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
def get_config_dict(self) -> dict[str, Any]:
"""
Return a dictionary with all meaningful configuration values of the evaluator to store in the model card.
"""
return {}
def embed_inputs(
self,
model: SentenceTransformer,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]:
"""
Call the encodder methode of the model pass
Args:
model (SentenceTransformer): Model we are evaluating
sentences (str | list[str] | np.ndarray): Text that we are embedding
Returns:
list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]: The associate Embedding
"""
return model.encode(sentences, **kwargs)
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
if not name:
return {key: maybe_to_float(value) for key, value in metrics.items()}
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
def get_config_dict(self) -> dict[str, Any]:
"""
Return a dictionary with all meaningful configuration values of the evaluator to store in the model card.
"""
return {}
|
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
def pytest_configure(config):
config.addinivalue_line("markers", "torchaudio_latest: mark test to run with torchaudio>=0.12")
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_metrics_cache = test_hf_cache_home / "metrics"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
@pytest.fixture(autouse=True, scope="session")
def zero_time_out_for_remote_code():
datasets.config.TIME_OUT_REMOTE_CODE = 0
|
import pytest
import datasets
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
def pytest_configure(config):
config.addinivalue_line("markers", "torchaudio_latest: mark test to run with torchaudio>=0.12")
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_metrics_cache = test_hf_cache_home / "metrics"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
|
from typing import Any, Dict, Union
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
"""[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
.. v2betastatus:: ConvertBoundingBoxFormat transform
Args:
format (str or tv_tensors.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.tv_tensors.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def __init__(self, format: Union[str, tv_tensors.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = tv_tensors.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.convert_bounding_box_format(inpt, new_format=self.format) # type: ignore[return-value]
class ClampBoundingBoxes(Transform):
"""[BETA] Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``canvas_size`` meta-data.
.. v2betastatus:: ClampBoundingBoxes transform
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def _transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.clamp_bounding_boxes(inpt) # type: ignore[return-value]
|
from typing import Any, Dict, Union
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
"""[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
.. v2betastatus:: ConvertBoundingBoxFormat transform
Args:
format (str or datapoints.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.datapoints.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (datapoints.BoundingBoxes,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBoxes, params: Dict[str, Any]) -> datapoints.BoundingBoxes:
return F.convert_bounding_box_format(inpt, new_format=self.format) # type: ignore[return-value]
class ClampBoundingBoxes(Transform):
"""[BETA] Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``canvas_size`` meta-data.
.. v2betastatus:: ClampBoundingBoxes transform
"""
_transformed_types = (datapoints.BoundingBoxes,)
def _transform(self, inpt: datapoints.BoundingBoxes, params: Dict[str, Any]) -> datapoints.BoundingBoxes:
return F.clamp_bounding_boxes(inpt) # type: ignore[return-value]
|
from .BinaryClassificationEvaluator import BinaryClassificationEvaluator
from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator
from .InformationRetrievalEvaluator import InformationRetrievalEvaluator
from .LabelAccuracyEvaluator import LabelAccuracyEvaluator
from .MSEEvaluator import MSEEvaluator
from .MSEEvaluatorFromDataFrame import MSEEvaluatorFromDataFrame
from .ParaphraseMiningEvaluator import ParaphraseMiningEvaluator
from .RerankingEvaluator import RerankingEvaluator
from .SentenceEvaluator import SentenceEvaluator
from .SequentialEvaluator import SequentialEvaluator
from .SimilarityFunction import SimilarityFunction
from .TranslationEvaluator import TranslationEvaluator
from .TripletEvaluator import TripletEvaluator
__all__ = [
"SentenceEvaluator",
"SimilarityFunction",
"BinaryClassificationEvaluator",
"EmbeddingSimilarityEvaluator",
"InformationRetrievalEvaluator",
"LabelAccuracyEvaluator",
"MSEEvaluator",
"MSEEvaluatorFromDataFrame",
"ParaphraseMiningEvaluator",
"SequentialEvaluator",
"TranslationEvaluator",
"TripletEvaluator",
"RerankingEvaluator",
]
|
from .SentenceEvaluator import SentenceEvaluator
from .SimilarityFunction import SimilarityFunction
from .BinaryClassificationEvaluator import BinaryClassificationEvaluator
from .EmbeddingSimilarityEvaluator import EmbeddingSimilarityEvaluator
from .InformationRetrievalEvaluator import InformationRetrievalEvaluator
from .LabelAccuracyEvaluator import LabelAccuracyEvaluator
from .MSEEvaluator import MSEEvaluator
from .MSEEvaluatorFromDataFrame import MSEEvaluatorFromDataFrame
from .ParaphraseMiningEvaluator import ParaphraseMiningEvaluator
from .SequentialEvaluator import SequentialEvaluator
from .TranslationEvaluator import TranslationEvaluator
from .TripletEvaluator import TripletEvaluator
from .RerankingEvaluator import RerankingEvaluator
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.losses.CSRLoss import CSRLoss
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import (
CSRReconstructionLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseAnglELoss import SparseAnglELoss
from sentence_transformers.sparse_encoder.losses.SparseCachedGISTEmbedLoss import (
SparseCachedGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCachedMultipleNegativesRankingLoss import (
SparseCachedMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import (
SparseCoSENTLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCosineSimilarityLoss import (
SparseCosineSimilarityLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseDistillKLDivLoss import (
SparseDistillKLDivLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseGISTEmbedLoss import (
SparseGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMarginMSELoss import (
SparseMarginMSELoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMSELoss import SparseMSELoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseTripletLoss import (
SparseTripletLoss,
)
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
]
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.losses.CSRLoss import CSRLoss
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import (
CSRReconstructionLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseAnglELoss import SparseAnglELoss
from sentence_transformers.sparse_encoder.losses.SparseCachedGISTEmbedLoss import (
SparseCachedGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCachedMultipleNegativesRankingLoss import (
SparseCachedMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import (
SparseCoSENTLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseCosineSimilarityLoss import (
SparseCosineSimilarityLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseGISTEmbedLoss import (
SparseGISTEmbedLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMarginMSELoss import (
SparseMarginMSELoss,
)
from sentence_transformers.sparse_encoder.losses.SparseMSELoss import SparseMSELoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.losses.SparseTripletLoss import (
SparseTripletLoss,
)
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_SAMPLERS
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class RandomSampler(BaseSampler):
"""Random sampler.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int, optional): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool, optional): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery, num):
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
# This is a temporary fix. We can revert the following code
# when PyTorch fixes the abnormal return of torch.randperm.
# See: https://github.com/open-mmlab/mmdetection/pull/5014
perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device)
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Randomly sample some positive samples."""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Randomly sample some negative samples."""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
|
import torch
from ..builder import BBOX_SAMPLERS
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class RandomSampler(BaseSampler):
"""Random sampler.
Args:
num (int): Number of samples
pos_fraction (float): Fraction of positive samples
neg_pos_up (int, optional): Upper bound number of negative and
positive samples. Defaults to -1.
add_gt_as_proposals (bool, optional): Whether to add ground truth
boxes as proposals. Defaults to True.
"""
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
from mmdet.core.bbox import demodata
super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.rng = demodata.ensure_rng(kwargs.get('rng', None))
def random_choice(self, gallery, num):
"""Random select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
# This is a temporary fix. We can revert the following code
# when PyTorch fixes the abnormal return of torch.randperm.
# See: https://github.com/open-mmlab/mmdetection/pull/5014
perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device)
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Randomly sample some positive samples."""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self, assign_result, num_expected, **kwargs):
"""Randomly sample some negative samples."""
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
return self.random_choice(neg_inds, num_expected)
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is responsible for cleaning the model section of the table of content by removing duplicates and sorting
the entries in alphabetical order.
Usage (from the root of the repo):
Check that the table of content is properly sorted (used in `make quality`):
```bash
python utils/check_doc_toc.py
```
Auto-sort the table of content if it is not properly sorted (used in `make style`):
```bash
python utils/check_doc_toc.py --fix_and_overwrite
```
"""
import argparse
from collections import defaultdict
import yaml
PATH_TO_TOC = "docs/source/en/_toctree.yml"
def clean_model_doc_toc(model_doc: list[dict]) -> list[dict]:
"""
Cleans a section of the table of content of the model documentation (one specific modality) by removing duplicates
and sorting models alphabetically.
Args:
model_doc (`List[dict]`):
The list of dictionaries extracted from the `_toctree.yml` file for this specific modality.
Returns:
`List[dict]`: List of dictionaries like the input, but cleaned up and sorted.
"""
counts = defaultdict(int)
for doc in model_doc:
counts[doc["local"]] += 1
duplicates = [key for key, value in counts.items() if value > 1]
new_doc = []
for duplicate_key in duplicates:
titles = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key})
if len(titles) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others."
)
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1])
# Sort
return sorted(new_doc, key=lambda s: s["title"].lower())
def check_model_doc(overwrite: bool = False):
"""
Check that the content of the table of content in `_toctree.yml` is clean (no duplicates and sorted for the model
API doc) and potentially auto-cleans it.
Args:
overwrite (`bool`, *optional*, defaults to `False`):
Whether to just check if the TOC is clean or to auto-clean it (when `overwrite=True`).
"""
with open(PATH_TO_TOC, encoding="utf-8") as f:
content = yaml.safe_load(f.read())
# Get to the API doc
api_idx = 0
while content[api_idx]["title"] != "API":
api_idx += 1
api_doc = content[api_idx]["sections"]
# Then to the model doc
model_idx = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
model_doc = api_doc[model_idx]["sections"]
# Extract the modalities and clean them one by one.
modalities_docs = [(idx, section) for idx, section in enumerate(model_doc) if "sections" in section]
diff = False
for idx, modality_doc in modalities_docs:
old_modality_doc = modality_doc["sections"]
new_modality_doc = clean_model_doc_toc(old_modality_doc)
if old_modality_doc != new_modality_doc:
diff = True
if overwrite:
model_doc[idx]["sections"] = new_modality_doc
if diff:
if overwrite:
api_doc[model_idx]["sections"] = model_doc
content[api_idx]["sections"] = api_doc
with open(PATH_TO_TOC, "w", encoding="utf-8") as f:
f.write(yaml.dump(content, allow_unicode=True))
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
args = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
|
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is responsible for cleaning the model section of the table of content by removing duplicates and sorting
the entries in alphabetical order.
Usage (from the root of the repo):
Check that the table of content is properly sorted (used in `make quality`):
```bash
python utils/check_doc_toc.py
```
Auto-sort the table of content if it is not properly sorted (used in `make style`):
```bash
python utils/check_doc_toc.py --fix_and_overwrite
```
"""
import argparse
from collections import defaultdict
from typing import List
import yaml
PATH_TO_TOC = "docs/source/en/_toctree.yml"
def clean_model_doc_toc(model_doc: List[dict]) -> List[dict]:
"""
Cleans a section of the table of content of the model documentation (one specific modality) by removing duplicates
and sorting models alphabetically.
Args:
model_doc (`List[dict]`):
The list of dictionaries extracted from the `_toctree.yml` file for this specific modality.
Returns:
`List[dict]`: List of dictionaries like the input, but cleaned up and sorted.
"""
counts = defaultdict(int)
for doc in model_doc:
counts[doc["local"]] += 1
duplicates = [key for key, value in counts.items() if value > 1]
new_doc = []
for duplicate_key in duplicates:
titles = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key})
if len(titles) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others."
)
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1])
# Sort
return sorted(new_doc, key=lambda s: s["title"].lower())
def check_model_doc(overwrite: bool = False):
"""
Check that the content of the table of content in `_toctree.yml` is clean (no duplicates and sorted for the model
API doc) and potentially auto-cleans it.
Args:
overwrite (`bool`, *optional*, defaults to `False`):
Whether to just check if the TOC is clean or to auto-clean it (when `overwrite=True`).
"""
with open(PATH_TO_TOC, encoding="utf-8") as f:
content = yaml.safe_load(f.read())
# Get to the API doc
api_idx = 0
while content[api_idx]["title"] != "API":
api_idx += 1
api_doc = content[api_idx]["sections"]
# Then to the model doc
model_idx = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
model_doc = api_doc[model_idx]["sections"]
# Extract the modalities and clean them one by one.
modalities_docs = [(idx, section) for idx, section in enumerate(model_doc) if "sections" in section]
diff = False
for idx, modality_doc in modalities_docs:
old_modality_doc = modality_doc["sections"]
new_modality_doc = clean_model_doc_toc(old_modality_doc)
if old_modality_doc != new_modality_doc:
diff = True
if overwrite:
model_doc[idx]["sections"] = new_modality_doc
if diff:
if overwrite:
api_doc[model_idx]["sections"] = model_doc
content[api_idx]["sections"] = api_doc
with open(PATH_TO_TOC, "w", encoding="utf-8") as f:
f.write(yaml.dump(content, allow_unicode=True))
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
args = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class TextDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Text(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
ignore_verifications = False
use_auth_token = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
use_auth_token=use_auth_token,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class TextDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Text(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
ignore_verifications = False
use_auth_token = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
use_auth_token=use_auth_token,
)
dataset = self.builder.as_dataset(
split=self.split, ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_pure_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
.. v2betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.datapoints.Image`
; this does not scale values.
.. v2betastatus:: ToImage transform
This transform does not support torchscript.
"""
_transformed_types = (is_pure_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> datapoints.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image - this does not scale values.
.. v2betastatus:: ToPILImage transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_pure_tensor, datapoints.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
class ToPureTensor(Transform):
"""[BETA] Convert all datapoints to pure tensors, removing associated metadata (if any).
.. v2betastatus:: ToPureTensor transform
This doesn't scale or change the values, only the type.
"""
_transformed_types = (datapoints.Datapoint,)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
return inpt.as_subclass(torch.Tensor)
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
.. v2betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.datapoints.Image`
; this does not scale values.
.. v2betastatus:: ToImage transform
This transform does not support torchscript.
"""
_transformed_types = (is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> datapoints.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image - this does not scale values.
.. v2betastatus:: ToPILImage transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_simple_tensor, datapoints.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
class ToPureTensor(Transform):
"""[BETA] Convert all datapoints to pure tensors, removing associated metadata (if any).
.. v2betastatus:: ToPureTensor transform
This doesn't scale or change the values, only the type.
"""
_transformed_types = (datapoints.Datapoint,)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
return inpt.as_subclass(torch.Tensor)
|
"""Test self-hosted embeddings."""
from typing import Any
from langchain_community.embeddings import (
SelfHostedEmbeddings,
SelfHostedHuggingFaceEmbeddings,
SelfHostedHuggingFaceInstructEmbeddings,
)
def get_remote_instance() -> Any:
"""Get remote instance for testing."""
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)
gpu.install_packages(["pip:./"])
return gpu
def test_self_hosted_huggingface_embedding_documents() -> None:
"""Test self-hosted huggingface embeddings."""
documents = ["foo bar"]
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceEmbeddings(hardware=gpu)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_self_hosted_huggingface_embedding_query() -> None:
"""Test self-hosted huggingface embeddings."""
document = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceEmbeddings(hardware=gpu)
output = embedding.embed_query(document)
assert len(output) == 768
def test_self_hosted_huggingface_instructor_embedding_documents() -> None:
"""Test self-hosted huggingface instruct embeddings."""
documents = ["foo bar"]
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_self_hosted_huggingface_instructor_embedding_query() -> None:
"""Test self-hosted huggingface instruct embeddings."""
query = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu)
output = embedding.embed_query(query)
assert len(output) == 768
def get_pipeline() -> Any:
"""Get pipeline for testing."""
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "facebook/bart-base"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline("feature-extraction", model=model, tokenizer=tokenizer)
def inference_fn(pipeline: Any, prompt: str) -> Any:
"""Inference function for testing."""
# Return last hidden state of the model
if isinstance(prompt, list):
return [emb[0][-1] for emb in pipeline(prompt)]
return pipeline(prompt)[0][-1]
def test_self_hosted_embedding_documents() -> None:
"""Test self-hosted huggingface instruct embeddings."""
documents = ["foo bar"] * 2
gpu = get_remote_instance()
embedding = SelfHostedEmbeddings(
model_load_fn=get_pipeline, hardware=gpu, inference_fn=inference_fn
)
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 50265
def test_self_hosted_embedding_query() -> None:
"""Test self-hosted custom embeddings."""
query = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedEmbeddings(
model_load_fn=get_pipeline, hardware=gpu, inference_fn=inference_fn
)
output = embedding.embed_query(query)
assert len(output) == 50265
|
"""Test self-hosted embeddings."""
from typing import Any
from langchain_community.embeddings import (
SelfHostedEmbeddings,
SelfHostedHuggingFaceEmbeddings,
SelfHostedHuggingFaceInstructEmbeddings,
)
def get_remote_instance() -> Any:
"""Get remote instance for testing."""
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)
gpu.install_packages(["pip:./"])
return gpu
def test_self_hosted_huggingface_embedding_documents() -> None:
"""Test self-hosted huggingface embeddings."""
documents = ["foo bar"]
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceEmbeddings(hardware=gpu)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_self_hosted_huggingface_embedding_query() -> None:
"""Test self-hosted huggingface embeddings."""
document = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceEmbeddings(hardware=gpu)
output = embedding.embed_query(document)
assert len(output) == 768
def test_self_hosted_huggingface_instructor_embedding_documents() -> None:
"""Test self-hosted huggingface instruct embeddings."""
documents = ["foo bar"]
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
def test_self_hosted_huggingface_instructor_embedding_query() -> None:
"""Test self-hosted huggingface instruct embeddings."""
query = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu)
output = embedding.embed_query(query)
assert len(output) == 768
def get_pipeline() -> Any:
"""Get pipeline for testing."""
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "facebook/bart-base"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
return pipeline("feature-extraction", model=model, tokenizer=tokenizer)
def inference_fn(pipeline: Any, prompt: str) -> Any:
"""Inference function for testing."""
# Return last hidden state of the model
if isinstance(prompt, list):
return [emb[0][-1] for emb in pipeline(prompt)]
return pipeline(prompt)[0][-1]
def test_self_hosted_embedding_documents() -> None:
"""Test self-hosted huggingface instruct embeddings."""
documents = ["foo bar"] * 2
gpu = get_remote_instance()
embedding = SelfHostedEmbeddings( # type: ignore[call-arg]
model_load_fn=get_pipeline, hardware=gpu, inference_fn=inference_fn
)
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 50265
def test_self_hosted_embedding_query() -> None:
"""Test self-hosted custom embeddings."""
query = "foo bar"
gpu = get_remote_instance()
embedding = SelfHostedEmbeddings( # type: ignore[call-arg]
model_load_fn=get_pipeline, hardware=gpu, inference_fn=inference_fn
)
output = embedding.embed_query(query)
assert len(output) == 50265
|
from typing import TYPE_CHECKING, List
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array import DocArrayStacked
from docarray.array.abstract_array import AnyDocArray
class DocArraySummary:
def __init__(self, da: 'AnyDocArray'):
self.da = da
def summary(self) -> None:
"""
Print a summary of this DocArray object and a summary of the schema of its
Document type.
"""
from rich import box
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from docarray.array import DocArrayStacked
table = Table(box=box.SIMPLE, highlight=True)
table.show_header = False
table.add_row('Type', self.da.__class__.__name__)
table.add_row('Length', str(len(self.da)), end_section=True)
if isinstance(self.da, DocArrayStacked):
table.add_row('Stacked columns:')
stacked_fields = self._get_stacked_fields(da=self.da)
for field_name in stacked_fields:
val = self.da
for attr in field_name.split('.'):
val = getattr(val, attr)
if isinstance(val, AbstractTensor):
comp_be = val.get_comp_backend()
if comp_be.to_numpy(comp_be.isnan(val)).all():
col_2 = f'None ({val.__class__.__name__})'
else:
col_2 = (
f'{val.__class__.__name__} of shape {comp_be.shape(val)}'
f', dtype: {comp_be.dtype(val)}'
)
if comp_be.device(val):
col_2 += f', device: {comp_be.device(val)}'
table.add_row(f' • {field_name}:', col_2)
Console().print(Panel(table, title='DocArray Summary', expand=False))
self.da.document_type.schema_summary()
@staticmethod
def _get_stacked_fields(da: 'DocArrayStacked') -> List[str]: # TODO this might
# broken
"""
Return a list of the field names of a DocArrayStacked instance that are
stacked, i.e. all the fields that are of type AbstractTensor. Nested field
paths are separated by dot, such as: 'attr.nested_attr'.
"""
fields = []
for field_name, value_tens in da._storage.tensor_columns.items():
fields.append(field_name)
for field_name, value_doc in da._storage.doc_columns.items():
fields.extend(
[
f'{field_name}.{x}'
for x in DocArraySummary._get_stacked_fields(da=value_doc)
]
)
return fields
|
from typing import TYPE_CHECKING, List
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array import DocumentArrayStacked
from docarray.array.abstract_array import AnyDocumentArray
class DocumentArraySummary:
def __init__(self, da: 'AnyDocumentArray'):
self.da = da
def summary(self) -> None:
"""
Print a summary of this DocumentArray object and a summary of the schema of its
Document type.
"""
from rich import box
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from docarray.array import DocumentArrayStacked
table = Table(box=box.SIMPLE, highlight=True)
table.show_header = False
table.add_row('Type', self.da.__class__.__name__)
table.add_row('Length', str(len(self.da)), end_section=True)
if isinstance(self.da, DocumentArrayStacked):
table.add_row('Stacked columns:')
stacked_fields = self._get_stacked_fields(da=self.da)
for field_name in stacked_fields:
val = self.da
for attr in field_name.split('.'):
val = getattr(val, attr)
if isinstance(val, AbstractTensor):
comp_be = val.get_comp_backend()
if comp_be.to_numpy(comp_be.isnan(val)).all():
col_2 = f'None ({val.__class__.__name__})'
else:
col_2 = (
f'{val.__class__.__name__} of shape {comp_be.shape(val)}'
f', dtype: {comp_be.dtype(val)}'
)
if comp_be.device(val):
col_2 += f', device: {comp_be.device(val)}'
table.add_row(f' • {field_name}:', col_2)
Console().print(Panel(table, title='DocumentArray Summary', expand=False))
self.da.document_type.schema_summary()
@staticmethod
def _get_stacked_fields(da: 'DocumentArrayStacked') -> List[str]: # TODO this might
# broken
"""
Return a list of the field names of a DocumentArrayStacked instance that are
stacked, i.e. all the fields that are of type AbstractTensor. Nested field
paths are separated by dot, such as: 'attr.nested_attr'.
"""
fields = []
for field_name, value_tens in da._storage.tensor_columns.items():
fields.append(field_name)
for field_name, value_doc in da._storage.doc_columns.items():
fields.extend(
[
f'{field_name}.{x}'
for x in DocumentArraySummary._get_stacked_fields(da=value_doc)
]
)
return fields
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python 2d_matryoshka_sts.py
OR
python 2d_matryoshka_sts.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
losses,
)
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 4
# Save path of the model
output_dir = f"output/2d_matryoshka_sts_{model_name.replace('/', '-')}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}"
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CoSENTLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) needs two text columns and one
# similarity score column (between 0 and 1)
inner_train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.Matryoshka2dLoss(model, inner_train_loss, [768, 512, 256, 128, 64])
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="2d-matryoshka-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-2d-matryoshka")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-2d-matryoshka')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python 2d_matryoshka_sts.py
OR
python 2d_matryoshka_sts.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
losses,
)
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 4
# Save path of the model
output_dir = f"output/2d_matryoshka_sts_{model_name.replace('/', '-')}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}"
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CoSENTLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) needs two text columns and one
# similarity score column (between 0 and 1)
inner_train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.Matryoshka2dLoss(model, inner_train_loss, [768, 512, 256, 128, 64])
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="2d-matryoshka-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-2d-matryoshka")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-2d-matryoshka')`."
)
|
# Copyright 2025 The HuggingFace Team, the AllenNLP library authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to close stale issue. Taken in part from the AllenNLP repository.
https://github.com/allenai/allennlp.
"""
import os
from datetime import datetime as dt
from datetime import timezone
from github import Github
LABELS_TO_EXEMPT = [
"close-to-merge",
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("huggingface/diffusers")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
labels = [label.name.lower() for label in issue.get_labels()]
if "stale" in labels:
comments = sorted(issue.get_comments(), key=lambda i: i.created_at, reverse=True)
last_comment = comments[0] if len(comments) > 0 else None
if last_comment is not None and last_comment.user.login != "github-actions[bot]":
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open")
issue.remove_from_labels("stale")
elif (
(dt.now(timezone.utc) - issue.updated_at).days > 23
and (dt.now(timezone.utc) - issue.created_at).days >= 30
and not any(label in LABELS_TO_EXEMPT for label in labels)
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored."
)
issue.add_to_labels("stale")
if __name__ == "__main__":
main()
|
# Copyright 2024 The HuggingFace Team, the AllenNLP library authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to close stale issue. Taken in part from the AllenNLP repository.
https://github.com/allenai/allennlp.
"""
import os
from datetime import datetime as dt
from datetime import timezone
from github import Github
LABELS_TO_EXEMPT = [
"close-to-merge",
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("huggingface/diffusers")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
labels = [label.name.lower() for label in issue.get_labels()]
if "stale" in labels:
comments = sorted(issue.get_comments(), key=lambda i: i.created_at, reverse=True)
last_comment = comments[0] if len(comments) > 0 else None
if last_comment is not None and last_comment.user.login != "github-actions[bot]":
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open")
issue.remove_from_labels("stale")
elif (
(dt.now(timezone.utc) - issue.updated_at).days > 23
and (dt.now(timezone.utc) - issue.created_at).days >= 30
and not any(label in LABELS_TO_EXEMPT for label in labels)
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored."
)
issue.add_to_labels("stale")
if __name__ == "__main__":
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class DDOD(SingleStageDetector):
"""Implementation of `DDOD <https://arxiv.org/pdf/2107.02963.pdf>`_.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class DDOD(SingleStageDetector):
"""Implementation of `DDOD <https://arxiv.org/pdf/2107.02963.pdf>`_.
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=601)))
# Using 32 GPUS while training
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001),
clip_grad=dict(max_norm=35, norm_type=2))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 64,
by_epoch=False,
begin=0,
end=26000),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=601)))
# Using 32 GPUS while training
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=26000,
warmup_ratio=1.0 / 64,
step=[8, 11])
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
"""
Computes embeddings
"""
from __future__ import annotations
import numpy as np
from sentence_transformers import SentenceTransformer
def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""
Test that encode(output_value='token_embeddings') works
"""
model = paraphrase_distilroberta_base_v1_model
sent = [
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
"Sentences",
"Sentence five five five five five five five",
]
emb = model.encode(sent, output_value="token_embeddings", batch_size=2)
assert len(emb) == len(sent)
for s, e in zip(sent, emb):
assert len(model.tokenize([s])["input_ids"][0]) == e.shape[0]
def test_encode_single_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Single sentence
emb = model.encode("Hello Word, a test sentence")
assert emb.shape == (768,)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Single sentence as list
emb = model.encode(["Hello Word, a test sentence"])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Sentence list
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 22.968266) < 0.007
def test_encode_normalize(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
],
normalize_embeddings=True,
)
assert emb.shape == (3, 768)
for norm in np.linalg.norm(emb, axis=1):
assert abs(norm - 1) < 0.001
def test_encode_tuple_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Input a sentence tuple
emb = model.encode([("Hello Word, a test sentence", "Second input for model")])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 9.503508) < 0.002
# List of sentence tuples
emb = model.encode(
[
("Hello Word, a test sentence", "Second input for model"),
("My second tuple", "With two inputs"),
("Final tuple", "final test"),
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 32.14627) < 0.002
|
"""
Computes embeddings
"""
import numpy as np
from sentence_transformers import SentenceTransformer
def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""
Test that encode(output_value='token_embeddings') works
"""
model = paraphrase_distilroberta_base_v1_model
sent = [
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
"Sentences",
"Sentence five five five five five five five",
]
emb = model.encode(sent, output_value="token_embeddings", batch_size=2)
assert len(emb) == len(sent)
for s, e in zip(sent, emb):
assert len(model.tokenize([s])["input_ids"][0]) == e.shape[0]
def test_encode_single_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Single sentence
emb = model.encode("Hello Word, a test sentence")
assert emb.shape == (768,)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Single sentence as list
emb = model.encode(["Hello Word, a test sentence"])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Sentence list
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 22.968266) < 0.007
def test_encode_normalize(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
],
normalize_embeddings=True,
)
assert emb.shape == (3, 768)
for norm in np.linalg.norm(emb, axis=1):
assert abs(norm - 1) < 0.001
def test_encode_tuple_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Input a sentence tuple
emb = model.encode([("Hello Word, a test sentence", "Second input for model")])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 9.503508) < 0.002
# List of sentence tuples
emb = model.encode(
[
("Hello Word, a test sentence", "Second input for model"),
("My second tuple", "With two inputs"),
("Final tuple", "final test"),
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 32.14627) < 0.002
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'qdrant-client~=0.7.3',
'elasticsearch>=8.0.1',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.0',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.0.1',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'elasticsearch>=8.0.1',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'qdrant-client~=0.7.3',
'elasticsearch>=8.0.1',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.0',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.0.1',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.0',
'elasticsearch>=8.0.1',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for exposed tensorflow versions."""
from tensorflow.python.framework import versions
from tensorflow.python.platform import test
class VersionTest(test.TestCase):
def testVersion(self):
self.assertEqual(type(versions.__version__), str)
self.assertEqual(type(versions.VERSION), str)
# This pattern will need to grow as we include alpha, builds, etc.
self.assertRegex(
versions.__version__, r'^\d+\.\d+\.(\d+(\-\w+)?(\+\w+)?|head)$'
)
self.assertRegex(
versions.VERSION, r'^\d+\.\d+\.(\d+(\-\w+)?(\+\w+)?|head)$'
)
def testGraphDefVersion(self):
version = versions.GRAPH_DEF_VERSION
min_consumer = versions.GRAPH_DEF_VERSION_MIN_CONSUMER
min_producer = versions.GRAPH_DEF_VERSION_MIN_PRODUCER
for v in version, min_consumer, min_producer:
self.assertEqual(type(v), int)
self.assertLessEqual(0, min_consumer)
self.assertLessEqual(0, min_producer)
self.assertLessEqual(min_producer, version)
def testGitAndCompilerVersion(self):
self.assertEqual(type(versions.__git_version__), str)
self.assertEqual(type(versions.__compiler_version__), str)
self.assertEqual(type(versions.GIT_VERSION), str)
self.assertEqual(type(versions.COMPILER_VERSION), str)
if __name__ == '__main__':
test.main()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for exposed tensorflow versions."""
from tensorflow.python.framework import versions
from tensorflow.python.platform import test
class VersionTest(test.TestCase):
def testVersion(self):
self.assertEqual(type(versions.__version__), str)
self.assertEqual(type(versions.VERSION), str)
# This pattern will need to grow as we include alpha, builds, etc.
self.assertRegex(versions.__version__, r'^\d+\.\d+\.(\d+(\-\w+)?|head)$')
self.assertRegex(versions.VERSION, r'^\d+\.\d+\.(\d+(\-\w+)?|head)$')
def testGraphDefVersion(self):
version = versions.GRAPH_DEF_VERSION
min_consumer = versions.GRAPH_DEF_VERSION_MIN_CONSUMER
min_producer = versions.GRAPH_DEF_VERSION_MIN_PRODUCER
for v in version, min_consumer, min_producer:
self.assertEqual(type(v), int)
self.assertLessEqual(0, min_consumer)
self.assertLessEqual(0, min_producer)
self.assertLessEqual(min_producer, version)
def testGitAndCompilerVersion(self):
self.assertEqual(type(versions.__git_version__), str)
self.assertEqual(type(versions.__compiler_version__), str)
self.assertEqual(type(versions.GIT_VERSION), str)
self.assertEqual(type(versions.COMPILER_VERSION), str)
if __name__ == '__main__':
test.main()
|
_base_ = 'ssd300_voc0712.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
input_size=input_size,
strides=[8, 16, 32, 64, 128, 256, 512],
basesize_ratio_range=(0.15, 0.9),
ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]))))
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=3,
dataset=dict( # RepeatDataset
# the dataset is repeated 10 times, and the training schedule is 2x,
# so the actual epoch = 12 * 10 = 120.
times=10,
dataset=dict( # ConcatDataset
# VOCDataset will add different `dataset_type` in dataset.metainfo,
# which will get error if using ConcatDataset. Adding
# `ignore_keys` can avoid this error.
ignore_keys=['dataset_type'],
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline),
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)
])))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = 'ssd300_voc0712.py'
input_size = 512
model = dict(
neck=dict(
out_channels=(512, 1024, 512, 256, 256, 256, 256),
level_strides=(2, 2, 2, 2, 1),
level_paddings=(1, 1, 1, 1, 1),
last_kernel_size=4),
bbox_head=dict(
in_channels=(512, 1024, 512, 256, 256, 256, 256),
anchor_generator=dict(
input_size=input_size,
strides=[8, 16, 32, 64, 128, 256, 512],
basesize_ratio_range=(0.15, 0.9),
ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]))))
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=3,
dataset=dict( # RepeatDataset
# the dataset is repeated 10 times, and the training schedule is 2x,
# so the actual epoch = 12 * 10 = 120.
times=10,
dataset=dict( # ConcatDataset
# VOCDataset will add different `DATASET_TYPE` in dataset.metainfo,
# which will get error if using ConcatDataset. Adding
# `ignore_keys` can avoid this error.
ignore_keys=['DATASET_TYPE'],
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline),
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)
])))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
], ))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
], ))
# # use ResNeSt img_norm
img_norm_cfg = dict(
mean=[123.68, 116.779, 103.939], std=[58.393, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=False,
poly2mask=False),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py'
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
lr_config = dict(
warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750])
# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs
runner = dict(type='IterBasedRunner', max_iters=90000)
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_270k_coco.py'
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
lr_config = dict(
warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750])
# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs
runner = dict(type='IterBasedRunner', max_iters=90000)
|
from __future__ import annotations
from typing import Any
from langchain_core._api import deprecated
from langchain_core.caches import BaseCache as BaseCache # For model_rebuild
from langchain_core.callbacks import Callbacks as Callbacks # For model_rebuild
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from langchain_core.utils import pre_init
from pydantic import BaseModel
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import SUMMARY_PROMPT
@deprecated(
since="0.2.12",
removal="1.0",
message=(
"Refer here for how to incorporate summaries of conversation history: "
"https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/"
),
)
class SummarizerMixin(BaseModel):
"""Mixin for summarizer."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
prompt: BasePromptTemplate = SUMMARY_PROMPT
summary_message_cls: type[BaseMessage] = SystemMessage
def predict_new_summary(
self,
messages: list[BaseMessage],
existing_summary: str,
) -> str:
new_lines = get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
chain = LLMChain(llm=self.llm, prompt=self.prompt)
return chain.predict(summary=existing_summary, new_lines=new_lines)
async def apredict_new_summary(
self,
messages: list[BaseMessage],
existing_summary: str,
) -> str:
new_lines = get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
chain = LLMChain(llm=self.llm, prompt=self.prompt)
return await chain.apredict(summary=existing_summary, new_lines=new_lines)
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
"""Continually summarizes the conversation history.
The summary is updated after each conversation turn.
The implementations returns a summary of the conversation history which
can be used to provide context to the model.
"""
buffer: str = ""
memory_key: str = "history" #: :meta private:
@classmethod
def from_messages(
cls,
llm: BaseLanguageModel,
chat_memory: BaseChatMessageHistory,
*,
summarize_step: int = 2,
**kwargs: Any,
) -> ConversationSummaryMemory:
obj = cls(llm=llm, chat_memory=chat_memory, **kwargs)
for i in range(0, len(obj.chat_memory.messages), summarize_step):
obj.buffer = obj.predict_new_summary(
obj.chat_memory.messages[i : i + summarize_step],
obj.buffer,
)
return obj
@property
def memory_variables(self) -> list[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Return history buffer."""
if self.return_messages:
buffer: Any = [self.summary_message_cls(content=self.buffer)]
else:
buffer = self.buffer
return {self.memory_key: buffer}
@pre_init
def validate_prompt_input_variables(cls, values: dict) -> dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
msg = (
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
raise ValueError(msg)
return values
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self.buffer = self.predict_new_summary(
self.chat_memory.messages[-2:],
self.buffer,
)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.buffer = ""
ConversationSummaryMemory.model_rebuild()
|
from __future__ import annotations
from typing import Any
from langchain_core._api import deprecated
from langchain_core.caches import BaseCache as BaseCache # For model_rebuild
from langchain_core.callbacks import Callbacks as Callbacks # For model_rebuild
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
from langchain_core.prompts import BasePromptTemplate
from langchain_core.utils import pre_init
from pydantic import BaseModel
from langchain.chains.llm import LLMChain
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import SUMMARY_PROMPT
@deprecated(
since="0.2.12",
removal="1.0",
message=(
"Refer here for how to incorporate summaries of conversation history: "
"https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/"
),
)
class SummarizerMixin(BaseModel):
"""Mixin for summarizer."""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
prompt: BasePromptTemplate = SUMMARY_PROMPT
summary_message_cls: type[BaseMessage] = SystemMessage
def predict_new_summary(
self, messages: list[BaseMessage], existing_summary: str
) -> str:
new_lines = get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
chain = LLMChain(llm=self.llm, prompt=self.prompt)
return chain.predict(summary=existing_summary, new_lines=new_lines)
async def apredict_new_summary(
self, messages: list[BaseMessage], existing_summary: str
) -> str:
new_lines = get_buffer_string(
messages,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
chain = LLMChain(llm=self.llm, prompt=self.prompt)
return await chain.apredict(summary=existing_summary, new_lines=new_lines)
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class ConversationSummaryMemory(BaseChatMemory, SummarizerMixin):
"""Continually summarizes the conversation history.
The summary is updated after each conversation turn.
The implementations returns a summary of the conversation history which
can be used to provide context to the model.
"""
buffer: str = ""
memory_key: str = "history" #: :meta private:
@classmethod
def from_messages(
cls,
llm: BaseLanguageModel,
chat_memory: BaseChatMessageHistory,
*,
summarize_step: int = 2,
**kwargs: Any,
) -> ConversationSummaryMemory:
obj = cls(llm=llm, chat_memory=chat_memory, **kwargs)
for i in range(0, len(obj.chat_memory.messages), summarize_step):
obj.buffer = obj.predict_new_summary(
obj.chat_memory.messages[i : i + summarize_step], obj.buffer
)
return obj
@property
def memory_variables(self) -> list[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Return history buffer."""
if self.return_messages:
buffer: Any = [self.summary_message_cls(content=self.buffer)]
else:
buffer = self.buffer
return {self.memory_key: buffer}
@pre_init
def validate_prompt_input_variables(cls, values: dict) -> dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
msg = (
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
raise ValueError(msg)
return values
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self.buffer = self.predict_new_summary(
self.chat_memory.messages[-2:], self.buffer
)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.buffer = ""
ConversationSummaryMemory.model_rebuild()
|
"""Function components."""
from inspect import signature
from typing import Any, Callable, Dict, Optional, Set, Tuple
from typing_extensions import Annotated
from llama_index.core.base.query_pipeline.query import (
InputKeys,
OutputKeys,
QueryComponent,
)
from llama_index.core.bridge.pydantic import (
Field,
PrivateAttr,
ConfigDict,
WithJsonSchema,
)
from llama_index.core.callbacks.base import CallbackManager
AnnotatedCallable = Annotated[
Callable,
WithJsonSchema({"type": "string"}, mode="serialization"),
WithJsonSchema({"type": "string"}, mode="validation"),
]
def get_parameters(fn: Callable) -> Tuple[Set[str], Set[str]]:
"""
Get parameters from function.
Returns:
Tuple[Set[str], Set[str]]: required and optional parameters
"""
# please write function below
params = signature(fn).parameters
required_params = set()
optional_params = set()
for param_name in params:
param_default = params[param_name].default
if param_default is params[param_name].empty:
required_params.add(param_name)
else:
optional_params.add(param_name)
return required_params, optional_params
class FnComponent(QueryComponent):
"""Query component that takes in an arbitrary function."""
model_config = ConfigDict(arbitrary_types_allowed=True)
fn: AnnotatedCallable = Field(..., description="Function to run.")
async_fn: Optional[AnnotatedCallable] = Field(
None, description="Async function to run. If not provided, will run `fn`."
)
output_key: str = Field(
default="output", description="Output key for component output."
)
_req_params: Set[str] = PrivateAttr()
_opt_params: Set[str] = PrivateAttr()
def __init__(
self,
fn: Callable,
async_fn: Optional[Callable] = None,
req_params: Optional[Set[str]] = None,
opt_params: Optional[Set[str]] = None,
output_key: str = "output",
**kwargs: Any,
) -> None:
"""Initialize."""
# determine parameters
super().__init__(fn=fn, async_fn=async_fn, output_key=output_key, **kwargs)
default_req_params, default_opt_params = get_parameters(fn)
if req_params is None:
req_params = default_req_params
if opt_params is None:
opt_params = default_opt_params
self._req_params = req_params
self._opt_params = opt_params
def set_callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
# TODO: implement
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
# check that all required parameters are present
missing_params = self._req_params - set(input.keys())
if missing_params:
raise ValueError(
f"Missing required parameters: {missing_params}. "
f"Input keys: {input.keys()}"
)
# check that no extra parameters are present
extra_params = set(input.keys()) - self._req_params - self._opt_params
if extra_params:
raise ValueError(
f"Extra parameters: {extra_params}. " f"Input keys: {input.keys()}"
)
return input
def _run_component(self, **kwargs: Any) -> Dict:
"""Run component."""
return {self.output_key: self.fn(**kwargs)}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component (async)."""
if self.async_fn is None:
return self._run_component(**kwargs)
else:
return {self.output_key: await self.async_fn(**kwargs)}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
return InputKeys.from_keys(
required_keys=self._req_params, optional_keys=self._opt_params
)
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({self.output_key})
# alias
FunctionComponent = FnComponent
|
"""Function components."""
from inspect import signature
from typing import Any, Callable, Dict, Optional, Set, Tuple
from typing_extensions import Annotated
from llama_index.core.base.query_pipeline.query import (
InputKeys,
OutputKeys,
QueryComponent,
)
from llama_index.core.bridge.pydantic import (
Field,
PrivateAttr,
ConfigDict,
WithJsonSchema,
)
from llama_index.core.callbacks.base import CallbackManager
AnnotatedCallable = Annotated[
Callable,
WithJsonSchema({"type": "string"}, mode="serialization"),
WithJsonSchema({"type": "string"}, mode="validation"),
]
def get_parameters(fn: Callable) -> Tuple[Set[str], Set[str]]:
"""Get parameters from function.
Returns:
Tuple[Set[str], Set[str]]: required and optional parameters
"""
# please write function below
params = signature(fn).parameters
required_params = set()
optional_params = set()
for param_name in params:
param_default = params[param_name].default
if param_default is params[param_name].empty:
required_params.add(param_name)
else:
optional_params.add(param_name)
return required_params, optional_params
class FnComponent(QueryComponent):
"""Query component that takes in an arbitrary function."""
model_config = ConfigDict(arbitrary_types_allowed=True)
fn: AnnotatedCallable = Field(..., description="Function to run.")
async_fn: Optional[AnnotatedCallable] = Field(
None, description="Async function to run. If not provided, will run `fn`."
)
output_key: str = Field(
default="output", description="Output key for component output."
)
_req_params: Set[str] = PrivateAttr()
_opt_params: Set[str] = PrivateAttr()
def __init__(
self,
fn: Callable,
async_fn: Optional[Callable] = None,
req_params: Optional[Set[str]] = None,
opt_params: Optional[Set[str]] = None,
output_key: str = "output",
**kwargs: Any,
) -> None:
"""Initialize."""
# determine parameters
super().__init__(fn=fn, async_fn=async_fn, output_key=output_key, **kwargs)
default_req_params, default_opt_params = get_parameters(fn)
if req_params is None:
req_params = default_req_params
if opt_params is None:
opt_params = default_opt_params
self._req_params = req_params
self._opt_params = opt_params
def set_callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
# TODO: implement
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
# check that all required parameters are present
missing_params = self._req_params - set(input.keys())
if missing_params:
raise ValueError(
f"Missing required parameters: {missing_params}. "
f"Input keys: {input.keys()}"
)
# check that no extra parameters are present
extra_params = set(input.keys()) - self._req_params - self._opt_params
if extra_params:
raise ValueError(
f"Extra parameters: {extra_params}. " f"Input keys: {input.keys()}"
)
return input
def _run_component(self, **kwargs: Any) -> Dict:
"""Run component."""
return {self.output_key: self.fn(**kwargs)}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component (async)."""
if self.async_fn is None:
return self._run_component(**kwargs)
else:
return {self.output_key: await self.async_fn(**kwargs)}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
return InputKeys.from_keys(
required_keys=self._req_params, optional_keys=self._opt_params
)
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({self.output_key})
# alias
FunctionComponent = FnComponent
|
import os
import asyncio
import cognee
import pytest
from llama_index.core import Document
from llama_index.graph_rag.cognee import CogneeGraphRAG
@pytest.mark.skipif(
os.getenv("OPENAI_API_KEY") is None,
reason="OPENAI_API_KEY not available to test Cognee integration",
)
@pytest.mark.asyncio()
async def test_graph_rag_cognee():
documents = [
Document(
text="Jessica Miller, Experienced Sales Manager with a strong track record in driving sales growth and building high-performing teams."
),
Document(
text="David Thompson, Creative Graphic Designer with over 8 years of experience in visual design and branding."
),
]
# Instantiate cognee GraphRAG
cogneeRAG = CogneeGraphRAG(
llm_api_key=os.environ["OPENAI_API_KEY"],
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_db",
)
# Add data to cognee
await cogneeRAG.add(documents, "test")
# Process data into a knowledge graph
await cogneeRAG.process_data("test")
# Answer prompt based on knowledge graph
search_results = await cogneeRAG.search("Tell me who are the people mentioned?")
assert len(search_results) > 0, "No search results found"
print("\n\nAnswer based on knowledge graph:\n")
for result in search_results:
print(f"{result}\n")
# Answer prompt based on RAG
search_results = await cogneeRAG.rag_search("Tell me who are the people mentioned?")
assert len(search_results) > 0, "No search results found"
print("\n\nAnswer based on RAG:\n")
for result in search_results:
print(f"{result}\n")
# Search for related nodes
search_results = await cogneeRAG.get_related_nodes("person")
print("\n\nRelated nodes are:\n")
for result in search_results:
print(f"{result}\n")
assert len(search_results) > 0, "No search results found"
# Clean all data from previous runs
await cognee.prune.prune_data()
await cognee.prune.prune_system(metadata=True)
if __name__ == "__main__":
asyncio.run(test_graph_rag_cognee())
|
import os
import asyncio
import cognee
import pytest
from llama_index.core import Document
from llama_index.graph_rag.cognee import CogneeGraphRAG
@pytest.mark.skipif(
os.getenv("OPENAI_API_KEY") is None,
reason="OPENAI_API_KEY not available to test Cognee integration",
)
@pytest.mark.asyncio()
async def test_graph_rag_cognee():
documents = [
Document(
text="Jessica Miller, Experienced Sales Manager with a strong track record in driving sales growth and building high-performing teams."
),
Document(
text="David Thompson, Creative Graphic Designer with over 8 years of experience in visual design and branding."
),
]
# Instantiate cognee GraphRAG
cogneeRAG = CogneeGraphRAG(
llm_api_key=os.environ["OPENAI_API_KEY"],
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
db_name="cognee_db",
)
# Add data to cognee
await cogneeRAG.add(documents, "test")
# Process data into a knowledge graph
await cogneeRAG.process_data("test")
# Answer prompt based on knowledge graph
search_results = await cogneeRAG.search("person")
assert len(search_results) > 0, "No search results found"
print("\n\nExtracted sentences are:\n")
for result in search_results:
print(f"{result}\n")
# Search for related nodes
search_results = await cogneeRAG.get_related_nodes("person")
print("\n\nRelated nodes are:\n")
for result in search_results:
print(f"{result}\n")
assert len(search_results) > 0, "No search results found"
# Clean all data from previous runs
await cognee.prune.prune_data()
await cognee.prune.prune_system(metadata=True)
if __name__ == "__main__":
asyncio.run(test_graph_rag_cognee())
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.20.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# isort: split
# Deprecated modules
from . import arrow_dataset as _arrow_dataset
from . import utils as _utils
from .exceptions import ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
from .utils import download_manager as _deprecated_download_manager
from .utils import info_utils as _deprecated_info_utils
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
_deprecated_info_utils.ExpectedMoreDownloadedFiles = ExpectedMoreDownloadedFiles
_deprecated_info_utils.ExpectedMoreSplits = ExpectedMoreSplits
_deprecated_info_utils.UnexpectedDownloadedFile = UnexpectedDownloadedFile
_deprecated_info_utils.UnexpectedSplits = UnexpectedSplits
del _arrow_dataset, _utils, _deprecated_download_manager
del _deprecated_info_utils, ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.19.3.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# isort: split
# Deprecated modules
from . import arrow_dataset as _arrow_dataset
from . import utils as _utils
from .exceptions import ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
from .utils import download_manager as _deprecated_download_manager
from .utils import info_utils as _deprecated_info_utils
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
_deprecated_info_utils.ExpectedMoreDownloadedFiles = ExpectedMoreDownloadedFiles
_deprecated_info_utils.ExpectedMoreSplits = ExpectedMoreSplits
_deprecated_info_utils.UnexpectedDownloadedFile = UnexpectedDownloadedFile
_deprecated_info_utils.UnexpectedSplits = UnexpectedSplits
del _arrow_dataset, _utils, _deprecated_download_manager
del _deprecated_info_utils, ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmengine.dist import get_dist_info
from mmengine.hooks import Hook
from torch import nn
from mmdet.registry import HOOKS
from ..utils.dist_utils import all_reduce_dict
def get_norm_states(module: nn.Module) -> OrderedDict:
"""Get the state_dict of batch norms in the module."""
async_norm_states = OrderedDict()
for name, child in module.named_modules():
if isinstance(child, nn.modules.batchnorm._NormBase):
for k, v in child.state_dict().items():
async_norm_states['.'.join([name, k])] = v
return async_norm_states
@HOOKS.register_module()
class SyncNormHook(Hook):
"""Synchronize Norm states before validation, currently used in YOLOX."""
def before_val_epoch(self, runner):
"""Synchronizing norm."""
module = runner.model
_, world_size = get_dist_info()
if world_size == 1:
return
norm_states = get_norm_states(module)
if len(norm_states) == 0:
return
# TODO: use `all_reduce_dict` in mmengine
norm_states = all_reduce_dict(norm_states, op='mean')
module.load_state_dict(norm_states, strict=False)
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import Hook
from torch import nn
from mmdet.registry import HOOKS
from ..utils.dist_utils import all_reduce_dict
def get_norm_states(module):
async_norm_states = OrderedDict()
for name, child in module.named_modules():
if isinstance(child, nn.modules.batchnorm._NormBase):
for k, v in child.state_dict().items():
async_norm_states['.'.join([name, k])] = v
return async_norm_states
@HOOKS.register_module()
class SyncNormHook(Hook):
"""Synchronize Norm states after training epoch, currently used in YOLOX.
Args:
num_last_epochs (int): The number of latter epochs in the end of the
training to switch to synchronizing norm interval. Default: 15.
interval (int): Synchronizing norm interval. Default: 1.
"""
def __init__(self, num_last_epochs=15, interval=1):
self.interval = interval
self.num_last_epochs = num_last_epochs
def before_train_epoch(self, runner):
epoch = runner.epoch
if (epoch + 1) == runner.max_epochs - self.num_last_epochs:
# Synchronize norm every epoch.
self.interval = 1
def after_train_epoch(self, runner):
"""Synchronizing norm."""
epoch = runner.epoch
module = runner.model
if (epoch + 1) % self.interval == 0:
_, world_size = get_dist_info()
if world_size == 1:
return
norm_states = get_norm_states(module)
if len(norm_states) == 0:
return
norm_states = all_reduce_dict(norm_states, op='mean')
module.load_state_dict(norm_states, strict=False)
|
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
# TODO: find a better way to collect image_level_labels
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'instances', 'image_level_labels'))
]
train_dataloader = dict(
batch_size=2,
num_workers=0, # workers_per_gpu > 0 may occur out of memory
persistent_workers=False,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/train-image-metas.pkl',
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=0,
persistent_workers=False,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/validation-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/validation/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/validation-image-metas.pkl',
image_level_ann_file='annotations/validation-'
'annotations-human-imagelabels-boxable.csv',
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='OpenImagesMetric',
iou_thr=0.5,
ioa_thr=0.5,
use_group_of=True,
get_supercategory=True)
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, denorm_bbox=True),
dict(type='Resize', img_scale=(1024, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
],
),
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=0, # workers_per_gpu > 0 may occur out of memory
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/oidv6-train-annotations-bbox.csv',
img_prefix=data_root + 'OpenImages/train/',
label_file=data_root + 'annotations/class-descriptions-boxable.csv',
hierarchy_file=data_root +
'annotations/bbox_labels_600_hierarchy.json',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/validation-annotations-bbox.csv',
img_prefix=data_root + 'OpenImages/validation/',
label_file=data_root + 'annotations/class-descriptions-boxable.csv',
hierarchy_file=data_root +
'annotations/bbox_labels_600_hierarchy.json',
meta_file=data_root + 'annotations/validation-image-metas.pkl',
image_level_ann_file=data_root +
'annotations/validation-annotations-human-imagelabels-boxable.csv',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/validation-annotations-bbox.csv',
img_prefix=data_root + 'OpenImages/validation/',
label_file=data_root + 'annotations/class-descriptions-boxable.csv',
hierarchy_file=data_root +
'annotations/bbox_labels_600_hierarchy.json',
meta_file=data_root + 'annotations/validation-image-metas.pkl',
image_level_ann_file=data_root +
'annotations/validation-annotations-human-imagelabels-boxable.csv',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='mAP')
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.runner import load_checkpoint
from .. import build_detector
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
teacher_config (str | dict): Config file path
or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
"""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_config,
teacher_ckpt=None,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, str):
teacher_config = mmcv.Config.fromfile(teacher_config)
self.teacher_model = build_detector(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(img)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(img)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.forward_train(x, out_teacher, img_metas,
gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
def cuda(self, device=None):
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling cuda function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def train(self, mode=True):
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name, value):
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
|
import mmcv
import torch
from mmcv.runner import load_checkpoint
from .. import build_detector
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
teacher_config (str | dict): Config file path
or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
"""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_config,
teacher_ckpt=None,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, str):
teacher_config = mmcv.Config.fromfile(teacher_config)
self.teacher_model = build_detector(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(img)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(img)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.forward_train(x, out_teacher, img_metas,
gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
def cuda(self, device=None):
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling cuda function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def train(self, mode=True):
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name, value):
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
|
from torchaudio._internal import module_utils as _mod_utils
from . import sox_utils
from .download import download_asset
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(0)
__all__ = [
"download_asset",
"sox_utils",
]
|
from torchaudio._internal import module_utils as _mod_utils
from . import sox_utils
from .download import download_asset
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(1)
__all__ = [
"download_asset",
"sox_utils",
]
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files",
[
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
],
)
def test_from_dir(files, tmp_path_factory):
dataset_infos_dir = tmp_path_factory.mktemp("dset_infos_dir")
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md", "w") as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---")
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md", "w") as f:
f.write("")
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json", "w") as f:
f.write('{"default": {"dataset_size": 42}}')
dataset_infos = DatasetInfosDict.from_directory(dataset_infos_dir)
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info",
[
DatasetInfo(),
DatasetInfo(
description="foo",
features=Features({"a": Value("int32")}),
builder_name="builder",
config_name="config",
version="1.0.0",
splits=[{"name": "train"}],
download_size=42,
),
],
)
def test_dataset_info_dump_and_reload(tmp_path, dataset_info: DatasetInfo):
tmp_path = str(tmp_path)
dataset_info.write_to_directory(tmp_path)
reloaded = DatasetInfo.from_directory(tmp_path)
assert dataset_info == reloaded
assert os.path.exists(os.path.join(tmp_path, "dataset_info.json"))
def test_dataset_info_to_yaml_dict():
dataset_info = DatasetInfo(
description="foo",
citation="bar",
homepage="https://foo.bar",
license="CC0",
features=Features({"a": Value("int32")}),
post_processed={},
supervised_keys=tuple(),
task_templates=[],
builder_name="builder",
config_name="config",
version="1.0.0",
splits=[{"name": "train", "num_examples": 42}],
download_checksums={},
download_size=1337,
post_processing_size=442,
dataset_size=1234,
size_in_bytes=1337 + 442 + 1234,
)
dataset_info_yaml_dict = dataset_info._to_yaml_dict()
assert sorted(dataset_info_yaml_dict) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML)
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str))
dataset_info_yaml = yaml.safe_dump(dataset_info_yaml_dict)
reloaded = yaml.safe_load(dataset_info_yaml)
assert dataset_info_yaml_dict == reloaded
def test_dataset_info_to_yaml_dict_empty():
dataset_info = DatasetInfo()
dataset_info_yaml_dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict",
[
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()}),
DatasetInfosDict({"my_config_name": DatasetInfo()}),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo",
features=Features({"a": Value("int32")}),
builder_name="builder",
config_name="config",
version="1.0.0",
splits=[{"name": "train"}],
download_size=42,
)
}
),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42),
"v2": DatasetInfo(dataset_size=1337),
}
),
],
)
def test_dataset_infos_dict_dump_and_reload(tmp_path, dataset_infos_dict: DatasetInfosDict):
tmp_path = str(tmp_path)
dataset_infos_dict.write_to_directory(tmp_path)
reloaded = DatasetInfosDict.from_directory(tmp_path)
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
dataset_info.config_name = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
dataset_infos_dict[config_name] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict())
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(tmp_path, "README.md"))
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"dataset_info",
[
DatasetInfo(),
DatasetInfo(
description="foo",
features=Features({"a": Value("int32")}),
builder_name="builder",
config_name="config",
version="1.0.0",
splits=[{"name": "train"}],
download_size=42,
),
],
)
def test_dataset_info_dump_and_reload(tmp_path, dataset_info: DatasetInfo):
tmp_path = str(tmp_path)
dataset_info.write_to_directory(tmp_path)
reloaded = DatasetInfo.from_directory(tmp_path)
assert dataset_info == reloaded
assert os.path.exists(os.path.join(tmp_path, "dataset_info.json"))
def test_dataset_info_to_yaml_dict():
dataset_info = DatasetInfo(
description="foo",
citation="bar",
homepage="https://foo.bar",
license="CC0",
features=Features({"a": Value("int32")}),
post_processed={},
supervised_keys=tuple(),
task_templates=[],
builder_name="builder",
config_name="config",
version="1.0.0",
splits=[{"name": "train", "num_examples": 42}],
download_checksums={},
download_size=1337,
post_processing_size=442,
dataset_size=1234,
size_in_bytes=1337 + 442 + 1234,
)
dataset_info_yaml_dict = dataset_info._to_yaml_dict()
assert sorted(dataset_info_yaml_dict) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML)
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str))
dataset_info_yaml = yaml.safe_dump(dataset_info_yaml_dict)
reloaded = yaml.safe_load(dataset_info_yaml)
assert dataset_info_yaml_dict == reloaded
def test_dataset_info_to_yaml_dict_empty():
dataset_info = DatasetInfo()
dataset_info_yaml_dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict",
[
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()}),
DatasetInfosDict({"my_config_name": DatasetInfo()}),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo",
features=Features({"a": Value("int32")}),
builder_name="builder",
config_name="config",
version="1.0.0",
splits=[{"name": "train"}],
download_size=42,
)
}
),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42),
"v2": DatasetInfo(dataset_size=1337),
}
),
],
)
def test_dataset_infos_dict_dump_and_reload(tmp_path, dataset_infos_dict: DatasetInfosDict):
tmp_path = str(tmp_path)
dataset_infos_dict.write_to_directory(tmp_path)
reloaded = DatasetInfosDict.from_directory(tmp_path)
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
dataset_info.config_name = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
dataset_infos_dict[config_name] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict())
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(tmp_path, "README.md"))
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Optional, Iterable, Any
from jina import Executor, DocumentArray, requests
from jina.excepts import BadDocType
import librosa as lr
import numpy as np
import torch
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
:param model_path: path of the pre-trained AudioCLIP model
:param default_traversal_paths: default traversal path
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
default_traversal_paths: Iterable[str] = ['r'],
*args,
**kwargs
):
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.aclp = AudioCLIP(pretrained=model_path)
self.aclp.eval()
self.aclp.audio.eval()
self.default_traversal_paths = default_traversal_paths
@requests
def encode(
self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs
) -> Any:
if docs:
cleaned_document_array = self._get_input_data(docs, parameters)
self._create_embeddings(cleaned_document_array)
def _get_input_data(self, docs: DocumentArray, parameters: dict):
"""Create a filtered set of Documents to iterate over."""
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
# traverse thought all documents which have to be processed
flat_docs = docs.traverse_flat(traversal_paths)
# filter out documents without audio wav
filtered_docs = DocumentArray(
[doc for doc in flat_docs if doc.blob is not None]
)
return filtered_docs
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.aclp.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, Iterable, Any
from jina import Executor, DocumentArray, requests
import torch
from .audio_clip.model import AudioCLIP
from .audio_clip.utils.transforms import ToTensor1D
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
:param model_path: path of the pre-trained AudioCLIP model
:param default_traversal_paths: default traversal path
"""
def __init__(self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
default_traversal_paths: Iterable[str] = ['r'],
*args, **kwargs):
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.aclp = AudioCLIP(pretrained=model_path)
self.aclp.eval()
self.aclp.audio.eval()
self.default_traversal_paths = default_traversal_paths
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs) -> Any:
if docs:
cleaned_document_array = self._get_input_data(docs, parameters)
self._create_embeddings(cleaned_document_array)
def _get_input_data(self, docs: DocumentArray, parameters: dict):
"""Create a filtered set of Documents to iterate over."""
traversal_paths = parameters.get('traversal_paths', self.default_traversal_paths)
# traverse thought all documents which have to be processed
flat_docs = docs.traverse_flat(traversal_paths)
# filter out documents without images
filtered_docs = DocumentArray([doc for doc in flat_docs if doc.blob is not None])
return filtered_docs
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.aclp.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
<<<<<<< HEAD
'XMLDataset',
'CocoDataset',
'DeepFashionDataset',
'VOCDataset',
'CityscapesDataset',
'LVISDataset',
'LVISV05Dataset',
'LVISV1Dataset',
'WIDERFaceDataset',
'get_loading_pipeline',
'CocoPanopticDataset',
'MultiImageMixDataset',
'OpenImagesDataset',
'OpenImagesChallengeDataset',
'AspectRatioBatchSampler',
'ClassAwareSampler',
'MultiSourceSampler',
'GroupMultiSourceSampler',
'BaseDetDataset',
'CrowdHumanDataset',
'Objects365V1Dataset',
'Objects365V2Dataset',
'DSDLDetDataset',
'BaseVideoDataset',
'MOTChallengeDataset',
'TrackImgSampler',
'ReIDDataset'
=======
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'BaseVideoDataset',
'MOTChallengeDataset', 'TrackImgSampler', 'ReIDDataset',
'YouTubeVISDataset'
>>>>>>> [Feature] support mask2former for vis (#10245)
]
|
import types
from typing_extensions import TYPE_CHECKING
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.typing.tensor.audio import AudioTensorFlowTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTorchTensor # noqa: F401
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
from docarray.typing.tensor.video import VideoTensorFlowTensor # noqa: F401
from docarray.typing.tensor.video import VideoTorchTensor # noqa: F401
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdArrayEmbedding',
'ImageNdArray',
'ImageTensor',
'AudioNdArray',
'VideoNdArray',
]
def __getattr__(name: str):
if 'Torch' in name:
import_library('torch', raise_error=True)
elif 'TensorFlow' in name:
import_library('tensorflow', raise_error=True)
lib: types.ModuleType
if name == 'TorchTensor':
import docarray.typing.tensor.torch_tensor as lib
elif name == 'TensorFlowTensor':
import docarray.typing.tensor.tensorflow_tensor as lib
elif name in ['TorchEmbedding', 'TensorFlowEmbedding']:
import docarray.typing.tensor.embedding as lib
elif name in ['ImageTorchTensor', 'ImageTensorFlowTensor']:
import docarray.typing.tensor.image as lib
elif name in ['AudioTorchTensor', 'AudioTensorFlowTensor']:
import docarray.typing.tensor.audio as lib
elif name in ['VideoTorchTensor', 'VideoTensorFlowTensor']:
import docarray.typing.tensor.video as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
tensor_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return tensor_cls
|
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdArrayEmbedding',
'ImageNdArray',
'ImageTensor',
'AudioNdArray',
'VideoNdArray',
]
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.audio import AudioTorchTensor # noqa: F401
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
from docarray.typing.tensor.video import VideoTorchTensor # noqa: F401
__all__.extend(
[
'TorchEmbedding',
'TorchTensor',
'ImageTorchTensor',
'AudioTorchTensor',
'VideoTorchTensor',
]
)
torch_available = is_torch_available()
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.audio import AudioTensorFlowTensor # noqa: F401
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor.video import VideoTensorFlowTensor # noqa: F401
__all__.extend(
[
'TensorFlowEmbedding',
'TensorFlowTensor',
'ImageTensorFlowTensor',
'AudioTensorFlowTensor',
'VideoTensorFlowTensor',
]
)
|
import numpy as np
import pytest
from docarray import Document, DocumentArray
@pytest.mark.parametrize('nrof_docs', [10, 100, 10_000, 10_100, 20_000, 20_100])
@pytest.mark.parametrize('columns', [[('price', 'int')], {'price': 'int'}])
def test_success_get_bulk_data(start_storage, nrof_docs, columns):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': columns,
'distance': 'l2_norm',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id=f'r{i}', embedding=np.ones((3,)) * i)
for i in range(nrof_docs)
]
)
assert len(elastic_doc[:, 'id']) == nrof_docs
@pytest.mark.parametrize('columns', [[('price', 'int')], {'price': 'int'}])
def test_error_get_bulk_data_id_not_exist(start_storage, columns):
nrof_docs = 10
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': columns,
'distance': 'l2_norm',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id=f'r{i}', embedding=np.ones((3,)) * i)
for i in range(nrof_docs)
]
)
with pytest.raises(KeyError) as e:
elastic_doc[['r1', 'r11', 'r21'], 'id']
assert e.value.args[0] == ['r11', 'r21']
assert len(e.value.args[1]) == 1
|
from docarray import Document, DocumentArray
import numpy as np
import pytest
@pytest.mark.parametrize('nrof_docs', [10, 100, 10_000, 10_100, 20_000, 20_100])
def test_success_get_bulk_data(start_storage, nrof_docs):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'distance': 'l2_norm',
'index_name': 'test_get_bulk_data',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id=f'r{i}', embedding=np.ones((3,)) * i)
for i in range(nrof_docs)
]
)
assert len(elastic_doc[:, 'id']) == nrof_docs
def test_error_get_bulk_data_id_not_exist(start_storage):
nrof_docs = 10
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'distance': 'l2_norm',
'index_name': 'test_error_get_bulk_data_id_not_exist',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id=f'r{i}', embedding=np.ones((3,)) * i)
for i in range(nrof_docs)
]
)
with pytest.raises(KeyError) as e:
elastic_doc[['r1', 'r11', 'r21'], 'id']
assert e.value.args[0] == ['r11', 'r21']
assert len(e.value.args[1]) == 1
|
from typing import List
from llama_index.core.base.embeddings.base import BaseEmbedding
from typing import Optional
try:
import chonkie
from chonkie import AutoEmbeddings
except ImportError:
raise ImportError(
"Could not import Autembeddings from chonkie. "
"Please install it with `pip install chonkie[all]`."
)
class ChonkieAutoEmbedding(BaseEmbedding):
"""
Autoembeddings from chonkie.
Args:
model_name (str): The name of the model to use.
"""
model_name: str
embedder: Optional[chonkie.BaseEmbeddings] = None
def __init__(
self,
model_name: str
) -> None:
super().__init__(model_name=model_name)
self.embedder = AutoEmbeddings.get_embeddings(self.model_name)
@classmethod
def class_name(cls) -> str:
return "ChonkieAutoEmbedding"
def _get_embedding(self, text: str) -> List[float]:
embed = self.embedder.embed(text)
return embed.tolist()
async def _aget_embedding(self, text: str) -> List[float]:
return self._get_embedding(text)
def _get_embeddings(self, texts: List[str]) -> List[List[float]]:
embeds = self.embedder.embed_batch(texts)
return [e.tolist() for e in embeds]
async def _aget_embeddings(
self,
texts: List[str],
) -> List[List[float]]:
return self._get_embeddings(texts)
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._get_embedding(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return await self._aget_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._get_embedding(text)
|
from typing import List
from llama_index.core.base.embeddings.base import BaseEmbedding
from typing import Optional
try:
import chonkie
from chonkie import AutoEmbeddings
except ImportError:
raise ImportError(
"Could not import Autembeddings from chonkie. "
"Please install it with `pip install chonkie[all]`."
)
class ChonkieAutoEmbedding(BaseEmbedding):
"""Autoembeddings from chonkie.
Args:
model_name (str): The name of the model to use.
"""
model_name: str
embedder: Optional[chonkie.BaseEmbeddings] = None
def __init__(
self,
model_name: str
) -> None:
super().__init__(model_name=model_name)
self.embedder = AutoEmbeddings.get_embeddings(self.model_name)
@classmethod
def class_name(cls) -> str:
return "ChonkieAutoEmbedding"
def _get_embedding(self, text: str) -> List[float]:
embed = self.embedder.embed(text)
return embed.tolist()
async def _aget_embedding(self, text: str) -> List[float]:
return self._get_embedding(text)
def _get_embeddings(self, texts: List[str]) -> List[List[float]]:
embeds = self.embedder.embed_batch(texts)
return [e.tolist() for e in embeds]
async def _aget_embeddings(
self,
texts: List[str],
) -> List[List[float]]:
return self._get_embeddings(texts)
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._get_embedding(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return await self._aget_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._get_embedding(text)
|
from typing import Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.firestore import FirestoreKVStore
class FirestoreIndexStore(KVIndexStore):
"""
Firestore Index store.
Args:
firestore_kvstore (FirestoreKVStore): Firestore key-value store
namespace (str): namespace for the index store
"""
def __init__(
self,
firestore_kvstore: FirestoreKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a FirestoreIndexStore."""
super().__init__(
firestore_kvstore, namespace=namespace, collection_suffix=collection_suffix
)
@classmethod
def from_database(
cls,
project: str,
database: str,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> "FirestoreIndexStore":
"""
Load a FirestoreIndexStore from a Firestore database.
Args:
project (str): The project which the client acts on behalf of.
database (str): The database name that the client targets.
namespace (str): namespace for the docstore.
collection_suffix (str): suffix for the collection name
"""
firestore_kvstore = FirestoreKVStore(project=project, database=database)
return cls(firestore_kvstore, namespace, collection_suffix)
|
from typing import Optional
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.firestore import FirestoreKVStore
class FirestoreIndexStore(KVIndexStore):
"""Firestore Index store.
Args:
firestore_kvstore (FirestoreKVStore): Firestore key-value store
namespace (str): namespace for the index store
"""
def __init__(
self,
firestore_kvstore: FirestoreKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a FirestoreIndexStore."""
super().__init__(
firestore_kvstore, namespace=namespace, collection_suffix=collection_suffix
)
@classmethod
def from_database(
cls,
project: str,
database: str,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> "FirestoreIndexStore":
"""
Load a FirestoreIndexStore from a Firestore database.
Args:
project (str): The project which the client acts on behalf of.
database (str): The database name that the client targets.
namespace (str): namespace for the docstore.
collection_suffix (str): suffix for the collection name
"""
firestore_kvstore = FirestoreKVStore(project=project, database=database)
return cls(firestore_kvstore, namespace, collection_suffix)
|
_base_ = './vfnet_r50-mdconv-c3-c5_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
from __future__ import annotations
import csv
import logging
import os
from typing import TYPE_CHECKING
import torch
from torch.utils.data import DataLoader
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.util import batch_to_device
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
Args:
dataloader (DataLoader): the data for the evaluation
"""
super().__init__()
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
self.primary_metric = "accuracy"
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info(f"Accuracy: {accuracy:.4f} ({correct}/{total})\n")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
metrics = {"accuracy": accuracy}
metrics = self.prefix_name_to_metrics(metrics, self.name)
self.store_metrics_in_model_card_data(model, metrics, epoch, steps)
return metrics
|
from __future__ import annotations
import csv
import logging
import os
from typing import TYPE_CHECKING
import torch
from torch.utils.data import DataLoader
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.util import batch_to_device
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
Args:
dataloader (DataLoader): the data for the evaluation
"""
super().__init__()
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
self.primary_metric = "accuracy"
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}:"
else:
out_txt = f" in epoch {epoch} after {steps} steps:"
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info(f"Accuracy: {accuracy:.4f} ({correct}/{total})\n")
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
metrics = {"accuracy": accuracy}
metrics = self.prefix_name_to_metrics(metrics, self.name)
self.store_metrics_in_model_card_data(model, metrics)
return metrics
|
# Copyright (c) OpenMMLab. All rights reserved.
import sys
from unittest import TestCase
import torch.cuda
import mmengine
from mmengine.utils.dl_utils import collect_env
from mmengine.utils.dl_utils.parrots_wrapper import _get_cuda_home
class TestCollectEnv(TestCase):
def test_get_cuda_home(self):
CUDA_HOME = _get_cuda_home()
if torch.version.cuda is not None:
self.assertIsNotNone(CUDA_HOME)
else:
self.assertIsNone(CUDA_HOME)
def test_collect_env(self):
env_info = collect_env()
expected_keys = [
'sys.platform', 'Python', 'CUDA available', 'PyTorch',
'PyTorch compiling details', 'OpenCV', 'MMEngine', 'GCC'
]
for key in expected_keys:
assert key in env_info
if env_info['CUDA available']:
for key in ['CUDA_HOME', 'NVCC']:
assert key in env_info
if sys.platform == 'win32':
assert 'MSVC' in env_info
assert env_info['sys.platform'] == sys.platform
assert env_info['Python'] == sys.version.replace('\n', '')
assert env_info['MMEngine'] == mmengine.__version__
|
# Copyright (c) OpenMMLab. All rights reserved.
import sys
from unittest import TestCase
import torch.cuda
import mmengine
from mmengine.utils.dl_utils import collect_env
from mmengine.utils.dl_utils.parrots_wrapper import _get_cuda_home
class TestCollectEnv(TestCase):
def test_get_cuda_home(self):
CUDA_HOME = _get_cuda_home()
if torch.cuda.is_available():
self.assertIsNotNone(CUDA_HOME)
else:
self.assertIsNone(CUDA_HOME)
def test_collect_env(self):
env_info = collect_env()
expected_keys = [
'sys.platform', 'Python', 'CUDA available', 'PyTorch',
'PyTorch compiling details', 'OpenCV', 'MMEngine', 'GCC'
]
for key in expected_keys:
assert key in env_info
if env_info['CUDA available']:
for key in ['CUDA_HOME', 'NVCC']:
assert key in env_info
if sys.platform == 'win32':
assert 'MSVC' in env_info
assert env_info['sys.platform'] == sys.platform
assert env_info['Python'] == sys.version.replace('\n', '')
assert env_info['MMEngine'] == mmengine.__version__
|
import os
import shutil
import pytest
import torch
import torchaudio
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank: int = 0):
super().__init__()
self.blank = blank
self.labels = labels
def forward(self, logits: torch.Tensor) -> str:
"""Given a sequence logits over labels, get the best path string
Args:
logits (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
str: The resulting transcript
"""
best_path = torch.argmax(logits, dim=-1) # [num_seq,]
best_path = torch.unique_consecutive(best_path, dim=-1)
hypothesis = []
for i in best_path:
if i != self.blank:
hypothesis.append(self.labels[i])
return "".join(hypothesis)
@pytest.fixture
def ctc_decoder():
return GreedyCTCDecoder
_FILES = {
"en": "Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac",
"de": "20090505-0900-PLENARY-16-de_20090505-21_56_00_8.flac",
"en2": "20120613-0900-PLENARY-8-en_20120613-13_46_50_3.flac",
"es": "20130207-0900-PLENARY-7-es_20130207-13_02_05_5.flac",
"fr": "20121212-0900-PLENARY-5-fr_20121212-11_37_04_10.flac",
"it": "20170516-0900-PLENARY-16-it_20170516-18_56_31_1.flac",
}
_MIXTURE_FILE = "mixture_3729-6852-0037_8463-287645-0000.wav"
_CLEAN_FILES = [
"s1_3729-6852-0037_8463-287645-0000.wav",
"s2_3729-6852-0037_8463-287645-0000.wav",
]
@pytest.fixture
def sample_speech(lang):
if lang not in _FILES:
raise NotImplementedError(f"Unexpected lang: {lang}")
filename = _FILES[lang]
path = torchaudio.utils.download_asset(f"test-assets/{filename}")
return path
@pytest.fixture
def mixture_source():
path = torchaudio.utils.download_asset(f"test-assets/{_MIXTURE_FILE}")
return path
@pytest.fixture
def clean_sources():
paths = []
for file in _CLEAN_FILES:
path = torchaudio.utils.download_asset(f"test-assets/{file}")
paths.append(path)
return paths
def pytest_addoption(parser):
parser.addoption(
"--use-tmp-hub-dir",
action="store_true",
help=(
"When provided, tests will use temporary directory as Torch Hub directory. "
"Downloaded models will be deleted after each test."
),
)
@pytest.fixture(autouse=True)
def temp_hub_dir(tmp_path, pytestconfig):
if not pytestconfig.getoption("use_tmp_hub_dir"):
yield
else:
org_dir = torch.hub.get_dir()
subdir = os.path.join(tmp_path, "hub")
torch.hub.set_dir(subdir)
yield
torch.hub.set_dir(org_dir)
shutil.rmtree(subdir, ignore_errors=True)
@pytest.fixture()
def emissions():
path = torchaudio.utils.download_asset("test-assets/emissions-8555-28447-0012.pt")
return torch.load(path)
|
import pytest
import torch
import torchaudio
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank: int = 0):
super().__init__()
self.blank = blank
self.labels = labels
def forward(self, logits: torch.Tensor) -> str:
"""Given a sequence logits over labels, get the best path string
Args:
logits (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
str: The resulting transcript
"""
best_path = torch.argmax(logits, dim=-1) # [num_seq,]
best_path = torch.unique_consecutive(best_path, dim=-1)
hypothesis = []
for i in best_path:
if i != self.blank:
hypothesis.append(self.labels[i])
return "".join(hypothesis)
@pytest.fixture
def ctc_decoder():
return GreedyCTCDecoder
_FILES = {
"en": "Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac",
"de": "20090505-0900-PLENARY-16-de_20090505-21_56_00_8.flac",
"en2": "20120613-0900-PLENARY-8-en_20120613-13_46_50_3.flac",
"es": "20130207-0900-PLENARY-7-es_20130207-13_02_05_5.flac",
"fr": "20121212-0900-PLENARY-5-fr_20121212-11_37_04_10.flac",
"it": "20170516-0900-PLENARY-16-it_20170516-18_56_31_1.flac",
}
_MIXTURE_FILE = "mixture_3729-6852-0037_8463-287645-0000.wav"
_CLEAN_FILES = [
"s1_3729-6852-0037_8463-287645-0000.wav",
"s2_3729-6852-0037_8463-287645-0000.wav",
]
@pytest.fixture
def sample_speech(tmp_path, lang):
if lang not in _FILES:
raise NotImplementedError(f"Unexpected lang: {lang}")
filename = _FILES[lang]
path = tmp_path.parent / filename
if not path.exists():
torchaudio.utils.download_asset(f"test-assets/{filename}", path=path)
return path
@pytest.fixture
def mixture_source():
path = torchaudio.utils.download_asset(f"test-assets/{_MIXTURE_FILE}")
return path
@pytest.fixture
def clean_sources():
paths = []
for file in _CLEAN_FILES:
path = torchaudio.utils.download_asset(f"test-assets/{file}")
paths.append(path)
return paths
def pytest_addoption(parser):
parser.addoption(
"--use-tmp-hub-dir",
action="store_true",
help=(
"When provided, tests will use temporary directory as Torch Hub directory. "
"Downloaded models will be deleted after each test."
),
)
@pytest.fixture(autouse=True)
def temp_hub_dir(tmpdir, pytestconfig):
if not pytestconfig.getoption("use_tmp_hub_dir"):
yield
else:
org_dir = torch.hub.get_dir()
torch.hub.set_dir(tmpdir)
yield
torch.hub.set_dir(org_dir)
@pytest.fixture()
def emissions():
path = torchaudio.utils.download_asset("test-assets/emissions-8555-28447-0012.pt")
return torch.load(path)
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from collections.abc import Mapping
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import Formatter
class NumpyFormatter(Formatter[Mapping, np.ndarray, Mapping]):
def __init__(self, features=None, **np_array_kwargs):
super().__init__(features=features)
self.np_array_kwargs = np_array_kwargs
def _consolidate(self, column):
if isinstance(column, list):
if column and all(
isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return np.stack(column)
else:
# don't use np.array(column, dtype=object)
# since it fails in certain cases
# see https://stackoverflow.com/q/51005699
out = np.empty(len(column), dtype=object)
out[:] = column
return out
return column
def _tensorize(self, value):
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value
elif isinstance(value, np.number):
return value
default_dtype = {}
if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": np.int64}
elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": np.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
return np.asarray(value, **self.np_array_kwargs)
return np.array(value, **{**default_dtype, **self.np_array_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> np.ndarray:
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
import sys
from collections.abc import Mapping
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import Formatter
class NumpyFormatter(Formatter[Mapping, np.ndarray, Mapping]):
def __init__(self, features=None, **np_array_kwargs):
super().__init__(features=features)
self.np_array_kwargs = np_array_kwargs
def _consolidate(self, column):
if isinstance(column, list):
if column and all(
isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return np.stack(column)
else:
# don't use np.array(column, dtype=object)
# since it fails in certain cases
# see https://stackoverflow.com/q/51005699
out = np.empty(len(column), dtype=object)
out[:] = column
return out
return column
def _tensorize(self, value):
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value
elif isinstance(value, np.number):
return value
default_dtype = {}
if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": np.int64}
elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": np.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
return np.asarray(value, **self.np_array_kwargs)
return np.array(value, **{**default_dtype, **self.np_array_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> np.ndarray:
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.15.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.17'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.15.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.17'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import ConfigDict
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/1912.08193>`_.
"""
def __init__(self,
backbone: ConfigDict,
rpn_head: ConfigDict,
roi_head: ConfigDict,
train_cfg: ConfigDict,
test_cfg: ConfigDict,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
data_preprocessor=data_preprocessor)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import ConfigDict
from mmdet.core.utils import OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/1912.08193>`_.
"""
def __init__(self,
backbone: ConfigDict,
rpn_head: ConfigDict,
roi_head: ConfigDict,
train_cfg: ConfigDict,
test_cfg: ConfigDict,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
data_preprocessor=data_preprocessor)
|
from pathlib import Path
from typing import Any, Callable, Optional, Union
from .folder import default_loader, ImageFolder
from .utils import download_and_extract_archive, verify_str_arg
class Country211(ImageFolder):
"""`The Country211 Data Set <https://github.com/openai/CLIP/blob/main/data/country211.md>`_ from OpenAI.
This dataset was built by filtering the images from the YFCC100m dataset
that have GPS coordinate corresponding to a ISO-3166 country code. The
dataset is balanced by sampling 150 train images, 50 validation images, and
100 test images for each country.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"valid"`` and ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and puts it into
``root/country211/``. If dataset is already downloaded, it is not downloaded again.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/country211.tgz"
_MD5 = "84988d7644798601126c29e9877aab6a"
def __init__(
self,
root: Union[str, Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Callable[[str], Any] = default_loader,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "valid", "test"))
root = Path(root).expanduser()
self.root = str(root)
self._base_folder = root / "country211"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
super().__init__(
str(self._base_folder / self._split),
transform=transform,
target_transform=target_transform,
loader=loader,
)
self.root = str(root)
def _check_exists(self) -> bool:
return self._base_folder.exists() and self._base_folder.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
from pathlib import Path
from typing import Callable, Optional, Union
from .folder import ImageFolder
from .utils import download_and_extract_archive, verify_str_arg
class Country211(ImageFolder):
"""`The Country211 Data Set <https://github.com/openai/CLIP/blob/main/data/country211.md>`_ from OpenAI.
This dataset was built by filtering the images from the YFCC100m dataset
that have GPS coordinate corresponding to a ISO-3166 country code. The
dataset is balanced by sampling 150 train images, 50 validation images, and
100 test images for each country.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"valid"`` and ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and puts it into
``root/country211/``. If dataset is already downloaded, it is not downloaded again.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/country211.tgz"
_MD5 = "84988d7644798601126c29e9877aab6a"
def __init__(
self,
root: Union[str, Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self._split = verify_str_arg(split, "split", ("train", "valid", "test"))
root = Path(root).expanduser()
self.root = str(root)
self._base_folder = root / "country211"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
super().__init__(str(self._base_folder / self._split), transform=transform, target_transform=target_transform)
self.root = str(root)
def _check_exists(self) -> bool:
return self._base_folder.exists() and self._base_folder.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.cdouble if i.is_complex() else torch.double, device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
def test_barkspectrogram(self):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~e-16) difference.
sample_rate = 8000
transform = T.BarkSpectrogram(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_barkscale(self):
sample_rate = 8000
n_fft = 400
n_barks = n_fft // 2 + 1
transform = T.BarkScale(sample_rate=sample_rate, n_barks=n_barks)
spec = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
)
self.assert_grad(transform, [spec])
|
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.cdouble if i.is_complex() else torch.double, device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
def test_barkspectrogram(self):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~e-16) difference.
sample_rate = 8000
transform = T.BarkSpectrogram(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_barkscale(self):
sample_rate = 8000
n_fft = 400
n_barks = n_fft // 2 + 1
transform = T.BarkScale(sample_rate=sample_rate, n_barks=n_barks)
spec = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
)
self.assert_grad(transform, [spec])
|
import os
import urllib
import numpy as np
import PIL
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(ImageUrl)
def test_dump_json():
url = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
w, h = 224, 224
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_to_bytes(width=w, height=h)
assert isinstance(_bytes, bytes)
img = PIL.Image.frombytes(mode='1', size=(w, h), data=_bytes)
assert isinstance(img, PIL.Image.Image)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
],
)
def test_validation(image_format, path_to_img):
if image_format == 'illegal':
with pytest.raises(ValueError):
parse_obj_as(ImageUrl, path_to_img)
else:
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
import os
import urllib
import numpy as np
import PIL
import pytest
from pydantic.tools import parse_obj_as
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
w, h = 224, 224
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_to_bytes(width=w, height=h)
assert isinstance(_bytes, bytes)
img = PIL.Image.frombytes(mode='1', size=(w, h), data=_bytes)
assert isinstance(img, PIL.Image.Image)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
],
)
def test_validation(image_format, path_to_img):
if image_format == 'illegal':
with pytest.raises(ValueError):
parse_obj_as(ImageUrl, path_to_img)
else:
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
from docarray.documents.mesh.mesh_3d import Mesh3D
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
__all__ = ['Mesh3D', 'VerticesAndFaces']
|
from docarray.documents.mesh.mesh_3d import Mesh3D
__all__ = ['Mesh3D']
|
import json
from typing import Any, Type, TypeGuard, TypeVar, overload
import jsonschema
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
from .type import type_match
def to_dict(data) -> dict:
if isinstance(data, BaseModel):
data = data.model_dump()
return jsonable_encoder(data)
def dumps(data: Any, *args: Any, **kwargs: Any) -> str:
"""
Serialize data to JSON string with automatic conversion of Pydantic models and complex types.
This function converts the input data to a JSON-serializable format using FastAPI's
jsonable_encoder before dumping to JSON. It handles Pydantic models, complex types,
and ensures proper serialization.
Parameters
----------
data : Any
The data to serialize. Can be any type including Pydantic models, dicts, lists, etc.
*args : Any
Additional positional arguments passed to json.dumps()
**kwargs : Any
Additional keyword arguments passed to json.dumps() (e.g., indent, separators)
Returns
-------
str
JSON string representation of the data
Examples
--------
>>> dumps({"name": "Alice", "age": 30})
'{"name": "Alice", "age": 30}'
>>> dumps(pydantic_model_instance, indent=2)
'{\n "field1": "value1",\n "field2": "value2"\n}'
"""
return json.dumps(to_dict(data), *args, **kwargs)
T = TypeVar("T")
@overload
def loads(data: str | bytes, *args, target_type: Type[T], **kwargs) -> T: ...
@overload
def loads(data: str | bytes, *args, **kwargs) -> Any: ...
def loads(
data: str | bytes, *args, target_type: Type[T] | None = None, **kwargs
) -> Any:
if isinstance(data, bytes):
data = data.decode("utf-8")
parsed = json.loads(data, *args, **kwargs)
if target_type:
return type_match(parsed, target_type)
return parsed
def validate_with_jsonschema(
schema: dict[str, Any], data: dict[str, Any]
) -> str | None:
"""
Validate the data against the schema.
Returns the validation error message if the data does not match the schema.
"""
try:
jsonschema.validate(data, schema)
return None
except jsonschema.ValidationError as e:
return str(e)
def is_list_of_basemodels(value: object) -> TypeGuard[list[BaseModel]]:
return isinstance(value, list) and all(
isinstance(item, BaseModel) for item in value
)
def convert_pydantic_to_json(output_data: Any) -> Any:
if isinstance(output_data, BaseModel):
return output_data.model_dump()
if is_list_of_basemodels(output_data):
return [item.model_dump() for item in output_data]
return output_data
|
import json
from typing import Any, Type, TypeGuard, TypeVar, overload
import jsonschema
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
from .type import type_match
def to_dict(data) -> dict:
if isinstance(data, BaseModel):
data = data.model_dump()
return jsonable_encoder(data)
def dumps(data) -> str:
return json.dumps(to_dict(data))
T = TypeVar("T")
@overload
def loads(data: str | bytes, *args, target_type: Type[T], **kwargs) -> T: ...
@overload
def loads(data: str | bytes, *args, **kwargs) -> Any: ...
def loads(
data: str | bytes, *args, target_type: Type[T] | None = None, **kwargs
) -> Any:
if isinstance(data, bytes):
data = data.decode("utf-8")
parsed = json.loads(data, *args, **kwargs)
if target_type:
return type_match(parsed, target_type)
return parsed
def validate_with_jsonschema(
schema: dict[str, Any], data: dict[str, Any]
) -> str | None:
"""
Validate the data against the schema.
Returns the validation error message if the data does not match the schema.
"""
try:
jsonschema.validate(data, schema)
return None
except jsonschema.ValidationError as e:
return str(e)
def is_list_of_basemodels(value: object) -> TypeGuard[list[BaseModel]]:
return isinstance(value, list) and all(
isinstance(item, BaseModel) for item in value
)
def convert_pydantic_to_json(output_data: Any) -> Any:
if isinstance(output_data, BaseModel):
return output_data.model_dump()
if is_list_of_basemodels(output_data):
return [item.model_dump() for item in output_data]
return output_data
|
from typing import Any, Optional, Union, cast
from langchain_core.messages import AIMessage, ToolCall
from langchain_core.messages.tool import tool_call
from langchain_core.output_parsers import BaseGenerationOutputParser
from langchain_core.outputs import ChatGeneration, Generation
from pydantic import BaseModel, ConfigDict
class ToolsOutputParser(BaseGenerationOutputParser):
"""Output parser for tool calls."""
first_tool_only: bool = False
"""Whether to return only the first tool call."""
args_only: bool = False
"""Whether to return only the arguments of the tool calls."""
pydantic_schemas: Optional[list[type[BaseModel]]] = None
"""Pydantic schemas to parse tool calls into."""
model_config = ConfigDict(
extra="forbid",
)
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse a list of candidate model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
if not result or not isinstance(result[0], ChatGeneration):
return None if self.first_tool_only else []
message = cast(AIMessage, result[0].message)
tool_calls: list = [
dict(tc) for tc in _extract_tool_calls_from_message(message)
]
if isinstance(message.content, list):
# Map tool call id to index
id_to_index = {
block["id"]: i
for i, block in enumerate(message.content)
if isinstance(block, dict) and block["type"] == "tool_use"
}
tool_calls = [{**tc, "index": id_to_index[tc["id"]]} for tc in tool_calls]
if self.pydantic_schemas:
tool_calls = [self._pydantic_parse(tc) for tc in tool_calls]
elif self.args_only:
tool_calls = [tc["args"] for tc in tool_calls]
else:
pass
if self.first_tool_only:
return tool_calls[0] if tool_calls else None
else:
return [tool_call for tool_call in tool_calls]
def _pydantic_parse(self, tool_call: dict) -> BaseModel:
cls_ = {schema.__name__: schema for schema in self.pydantic_schemas or []}[
tool_call["name"]
]
return cls_(**tool_call["args"])
def _extract_tool_calls_from_message(message: AIMessage) -> list[ToolCall]:
"""Extract tool calls from a list of content blocks."""
if message.tool_calls:
return message.tool_calls
return extract_tool_calls(message.content)
def extract_tool_calls(content: Union[str, list[Union[str, dict]]]) -> list[ToolCall]:
"""Extract tool calls from a list of content blocks."""
if isinstance(content, list):
tool_calls = []
for block in content:
if isinstance(block, str):
continue
if block["type"] != "tool_use":
continue
tool_calls.append(
tool_call(name=block["name"], args=block["input"], id=block["id"])
)
return tool_calls
else:
return []
|
from typing import Any, Optional, Union, cast
from langchain_core.messages import AIMessage, ToolCall
from langchain_core.messages.tool import tool_call
from langchain_core.output_parsers import BaseGenerationOutputParser
from langchain_core.outputs import ChatGeneration, Generation
from pydantic import BaseModel, ConfigDict
class ToolsOutputParser(BaseGenerationOutputParser):
"""Output parser for tool calls."""
first_tool_only: bool = False
"""Whether to return only the first tool call."""
args_only: bool = False
"""Whether to return only the arguments of the tool calls."""
pydantic_schemas: Optional[list[type[BaseModel]]] = None
"""Pydantic schemas to parse tool calls into."""
model_config = ConfigDict(
extra="forbid",
)
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse a list of candidate model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
if not result or not isinstance(result[0], ChatGeneration):
return None if self.first_tool_only else []
message = cast(AIMessage, result[0].message)
tool_calls: list = [
dict(tc) for tc in _extract_tool_calls_from_message(message)
]
if isinstance(message.content, list):
# Map tool call id to index
id_to_index = {
block["id"]: i
for i, block in enumerate(message.content)
if isinstance(block, dict) and block["type"] == "tool_use"
}
tool_calls = [{**tc, "index": id_to_index[tc["id"]]} for tc in tool_calls]
if self.pydantic_schemas:
tool_calls = [self._pydantic_parse(tc) for tc in tool_calls]
elif self.args_only:
tool_calls = [tc["args"] for tc in tool_calls]
else:
pass
if self.first_tool_only:
return tool_calls[0] if tool_calls else None
else:
return [tool_call for tool_call in tool_calls]
def _pydantic_parse(self, tool_call: dict) -> BaseModel:
cls_ = {schema.__name__: schema for schema in self.pydantic_schemas or []}[
tool_call["name"]
]
return cls_(**tool_call["args"])
def _extract_tool_calls_from_message(message: AIMessage) -> list[ToolCall]:
"""Extract tool calls from a list of content blocks."""
if message.tool_calls:
return message.tool_calls
return extract_tool_calls(message.content)
def extract_tool_calls(content: Union[str, list[Union[str, dict]]]) -> list[ToolCall]:
"""Extract tool calls from a list of content blocks."""
if isinstance(content, list):
tool_calls = []
for block in content:
if isinstance(block, str):
continue
if block["type"] != "tool_use":
continue
tool_calls.append(
tool_call(name=block["name"], args=block["input"], id=block["id"])
)
return tool_calls
else:
return []
|
# Copyright 2021 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import Formatter
if TYPE_CHECKING:
import jax
class JaxFormatter(Formatter[Mapping, "jax.Array", Mapping]):
def __init__(self, features=None, **jnp_array_kwargs):
super().__init__(features=features)
self.jnp_array_kwargs = jnp_array_kwargs
import jax # noqa import jax at initialization
def _consolidate(self, column):
import jax
import jax.numpy as jnp
if isinstance(column, list) and column:
if all(
isinstance(x, jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return jnp.stack(column, axis=0)
return column
def _tensorize(self, value):
import jax
import jax.numpy as jnp
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_x64:
default_dtype = {"dtype": jnp.int64}
else:
default_dtype = {"dtype": jnp.int32}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": jnp.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "jax.Array":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
# Copyright 2021 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import Formatter
if TYPE_CHECKING:
import jax.numpy as jnp
class JaxFormatter(Formatter[Mapping, "jnp.ndarray", Mapping]):
def __init__(self, features=None, **jnp_array_kwargs):
super().__init__(features=features)
self.jnp_array_kwargs = jnp_array_kwargs
import jax.numpy as jnp # noqa import jax at initialization
def _consolidate(self, column):
import jax.numpy as jnp
if isinstance(column, list) and column:
if all(
isinstance(x, jnp.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column
):
return jnp.stack(column)
return column
def _tensorize(self, value):
import jax
import jax.numpy as jnp
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
default_dtype = {}
if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_x64:
default_dtype = {"dtype": jnp.int64}
else:
default_dtype = {"dtype": jnp.int32}
elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": jnp.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
value = np.asarray(value)
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> "jnp.ndarray":
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
# Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
from typing import Optional
import torch
TORCH_VERSION = torch.__version__
def is_rocm_pytorch() -> bool:
"""Check whether the PyTorch is compiled on ROCm."""
is_rocm = False
if TORCH_VERSION != 'parrots':
try:
from torch.utils.cpp_extension import ROCM_HOME
is_rocm = True if ((torch.version.hip is not None) and
(ROCM_HOME is not None)) else False
except ImportError:
pass
return is_rocm
def _get_cuda_home() -> Optional[str]:
"""Obtain the path of CUDA home."""
if TORCH_VERSION == 'parrots':
from parrots.utils.build_extension import CUDA_HOME
else:
if is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
CUDA_HOME = ROCM_HOME
else:
from torch.utils.cpp_extension import CUDA_HOME
return CUDA_HOME
def get_build_config():
"""Obtain the build information of PyTorch or Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.config import get_build_info
return get_build_info()
else:
return torch.__config__.show()
def _get_conv() -> tuple:
"""A wrapper to obtain base classes of Conv layers from PyTorch or
Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin
else:
from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin
return _ConvNd, _ConvTransposeMixin
def _get_dataloader() -> tuple:
"""A wrapper to obtain DataLoader class from PyTorch or Parrots."""
if TORCH_VERSION == 'parrots':
from torch.utils.data import DataLoader, PoolDataLoader
else:
from torch.utils.data import DataLoader
PoolDataLoader = DataLoader
return DataLoader, PoolDataLoader
def _get_extension():
"""A wrapper to obtain extension class from PyTorch or Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.utils.build_extension import BuildExtension, Extension
CppExtension = partial(Extension, cuda=False)
CUDAExtension = partial(Extension, cuda=True)
else:
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
return BuildExtension, CppExtension, CUDAExtension
def _get_pool() -> tuple:
"""A wrapper to obtain base classes of pooling layers from PyTorch or
Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.pool import (_AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd, _AvgPoolNd,
_MaxPoolNd)
else:
from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd, _AvgPoolNd,
_MaxPoolNd)
return _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd
def _get_norm() -> tuple:
"""A wrapper to obtain base classes of normalization layers from PyTorch or
Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm
SyncBatchNorm_ = torch.nn.SyncBatchNorm2d
else:
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
SyncBatchNorm_ = torch.nn.SyncBatchNorm
return _BatchNorm, _InstanceNorm, SyncBatchNorm_
_ConvNd, _ConvTransposeMixin = _get_conv()
DataLoader, PoolDataLoader = _get_dataloader()
BuildExtension, CppExtension, CUDAExtension = _get_extension()
_BatchNorm, _InstanceNorm, SyncBatchNorm_ = _get_norm()
_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd = _get_pool()
class SyncBatchNorm(SyncBatchNorm_): # type: ignore
def _check_input_dim(self, input):
if TORCH_VERSION == 'parrots':
if input.dim() < 2:
raise ValueError(
f'expected at least 2D input (got {input.dim()}D input)')
else:
super()._check_input_dim(input)
|
# Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
from typing import Optional
import torch
TORCH_VERSION = torch.__version__
def is_rocm_pytorch() -> bool:
"""Check whether the PyTorch is compiled on ROCm."""
is_rocm = False
if TORCH_VERSION != 'parrots':
try:
from torch.utils.cpp_extension import ROCM_HOME
is_rocm = True if ((torch.version.hip is not None) and
(ROCM_HOME is not None)) else False
except ImportError:
pass
return is_rocm
def _get_cuda_home() -> Optional[str]:
"""Obtain the path of CUDA home."""
if TORCH_VERSION == 'parrots':
from parrots.utils.build_extension import CUDA_HOME
else:
if is_rocm_pytorch():
from torch.utils.cpp_extension import ROCM_HOME
CUDA_HOME = ROCM_HOME
else:
from torch.utils.cpp_extension import CUDA_HOME
return CUDA_HOME
def get_build_config():
"""Obtain the build information of PyTorch or Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.config import get_build_info
return get_build_info()
else:
return torch.__config__.show()
def _get_conv() -> tuple:
"""A wrapper to obtain base classes of Conv layers from PyTorch or
Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin
else:
from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin
return _ConvNd, _ConvTransposeMixin
def _get_dataloader() -> tuple:
"""A wrapper to obtain DataLoader class from PyTorch or Parrots."""
if TORCH_VERSION == 'parrots':
from torch.utils.data import DataLoader, PoolDataLoader
else:
from torch.utils.data import DataLoader
PoolDataLoader = DataLoader
return DataLoader, PoolDataLoader
def _get_extension():
"""A wrapper to obtain extension class from PyTorch or Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.utils.build_extension import BuildExtension, Extension
CppExtension = partial(Extension, cuda=False)
CUDAExtension = partial(Extension, cuda=True)
else:
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
return BuildExtension, CppExtension, CUDAExtension
def _get_pool() -> tuple:
"""A wrapper to obtain base classes of pooling layers from PyTorch or
Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.pool import (_AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd, _AvgPoolNd,
_MaxPoolNd)
else:
from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd,
_AdaptiveMaxPoolNd, _AvgPoolNd,
_MaxPoolNd)
return _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd
def _get_norm() -> tuple:
"""A wrapper to obtain base classes of normalization layers from PyTorch or
Parrots."""
if TORCH_VERSION == 'parrots':
from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm
SyncBatchNorm_ = torch.nn.SyncBatchNorm2d
else:
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
SyncBatchNorm_ = torch.nn.SyncBatchNorm
return _BatchNorm, _InstanceNorm, SyncBatchNorm_
_ConvNd, _ConvTransposeMixin = _get_conv()
DataLoader, PoolDataLoader = _get_dataloader()
BuildExtension, CppExtension, CUDAExtension = _get_extension()
_BatchNorm, _InstanceNorm, SyncBatchNorm_ = _get_norm()
_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd = _get_pool()
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmcls>=0.20.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.TIMMBackbone',
model_name='efficientnet_b1',
features_only=True,
pretrained=True,
out_indices=(1, 2, 3, 4)),
neck=dict(in_channels=[24, 40, 112, 320]))
# optimizer
optim_wrapper = dict(optimizer=dict(lr=0.01))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmcls>=0.20.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.TIMMBackbone',
model_name='efficientnet_b1',
features_only=True,
pretrained=True,
out_indices=(1, 2, 3, 4)),
neck=dict(in_channels=[24, 40, 112, 320]))
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
default_scope = 'mmdet'
default_hooks = dict(
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='DetVisualizationHook'))
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
log_level = 'INFO'
load_from = None
resume = False
|
default_scope = 'mmdet'
default_hooks = dict(
optimizer=dict(type='OptimizerHook', grad_clip=None),
timer=dict(type='IterTimerHook'),
logger=dict(type='LoggerHook', interval=50),
param_scheduler=dict(type='ParamSchedulerHook'),
checkpoint=dict(type='CheckpointHook', interval=1),
sampler_seed=dict(type='DistSamplerSeedHook'),
visualization=dict(type='DetVisualizationHook'))
env_cfg = dict(
cudnn_benchmark=False,
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
dist_cfg=dict(backend='nccl'),
)
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='DetLocalVisualizer', vis_backends=vis_backends, name='visualizer')
log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True)
log_level = 'INFO'
load_from = None
resume = False
|
"""PlaygroundsSubgraphConnectorToolSpec."""
from typing import Optional, Union
import requests
from llama_index.tools.graphql.base import GraphQLToolSpec
class PlaygroundsSubgraphConnectorToolSpec(GraphQLToolSpec):
"""
Connects to subgraphs on The Graph's decentralized network via the Playgrounds API.
Attributes:
spec_functions (list): List of functions that specify the tool's capabilities.
url (str): The endpoint URL for the GraphQL requests.
headers (dict): Headers used for the GraphQL requests.
"""
spec_functions = ["graphql_request"]
def __init__(self, identifier: str, api_key: str, use_deployment_id: bool = False):
"""
Initialize the connector.
Args:
identifier (str): Subgraph identifier or Deployment ID.
api_key (str): API key for the Playgrounds API.
use_deployment_id (bool): Flag to indicate if the identifier is a deployment ID. Default is False.
"""
endpoint = "deployments" if use_deployment_id else "subgraphs"
self.url = (
f"https://api.playgrounds.network/v1/proxy/{endpoint}/id/{identifier}"
)
self.headers = {
"Content-Type": "application/json",
"Playgrounds-Api-Key": api_key,
}
def graphql_request(
self,
query: str,
variables: Optional[dict] = None,
operation_name: Optional[str] = None,
) -> Union[dict, str]:
"""
Make a GraphQL query.
Args:
query (str): The GraphQL query string to execute.
variables (dict, optional): Variables for the GraphQL query. Default is None.
operation_name (str, optional): Name of the operation, if multiple operations are present in the query. Default is None.
Returns:
dict: The response from the GraphQL server if successful.
str: Error message if the request fails.
"""
payload = {"query": query.strip()}
if variables:
payload["variables"] = variables
if operation_name:
payload["operationName"] = operation_name
try:
response = requests.post(self.url, headers=self.headers, json=payload)
# Check if the request was successful
response.raise_for_status()
# Return the JSON response
return response.json()
except requests.RequestException as e:
# Handle request errors
return str(e)
except ValueError as e:
# Handle JSON decoding errors
return f"Error decoding JSON: {e}"
|
"""PlaygroundsSubgraphConnectorToolSpec."""
from typing import Optional, Union
import requests
from llama_index.tools.graphql.base import GraphQLToolSpec
class PlaygroundsSubgraphConnectorToolSpec(GraphQLToolSpec):
"""
Connects to subgraphs on The Graph's decentralized network via the Playgrounds API.
Attributes:
spec_functions (list): List of functions that specify the tool's capabilities.
url (str): The endpoint URL for the GraphQL requests.
headers (dict): Headers used for the GraphQL requests.
"""
spec_functions = ["graphql_request"]
def __init__(self, identifier: str, api_key: str, use_deployment_id: bool = False):
"""
Initialize the connector.
Args:
identifier (str): Subgraph identifier or Deployment ID.
api_key (str): API key for the Playgrounds API.
use_deployment_id (bool): Flag to indicate if the identifier is a deployment ID. Default is False.
"""
endpoint = "deployments" if use_deployment_id else "subgraphs"
self.url = (
f"https://api.playgrounds.network/v1/proxy/{endpoint}/id/{identifier}"
)
self.headers = {
"Content-Type": "application/json",
"Playgrounds-Api-Key": api_key,
}
def graphql_request(
self,
query: str,
variables: Optional[dict] = None,
operation_name: Optional[str] = None,
) -> Union[dict, str]:
"""
Make a GraphQL query.
Args:
query (str): The GraphQL query string to execute.
variables (dict, optional): Variables for the GraphQL query. Default is None.
operation_name (str, optional): Name of the operation, if multiple operations are present in the query. Default is None.
Returns:
dict: The response from the GraphQL server if successful.
str: Error message if the request fails.
"""
payload = {"query": query.strip()}
if variables:
payload["variables"] = variables
if operation_name:
payload["operationName"] = operation_name
try:
response = requests.post(self.url, headers=self.headers, json=payload)
# Check if the request was successful
response.raise_for_status()
# Return the JSON response
return response.json()
except requests.RequestException as e:
# Handle request errors
return str(e)
except ValueError as e:
# Handle JSON decoding errors
return f"Error decoding JSON: {e}"
|
_base_ = './ga-retinanet_r101-caffe_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 960)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './ga_retinanet_r101_caffe_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 960)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestKDSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['ld/ld_r18_gflv1_r101_fpn_coco_1x.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
# Test forward train
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestKDSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(['ld/ld_r18_gflv1_r101_fpn_coco_1x.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
# Test forward train
losses = detector.forward(batch_inputs, data_samples, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('ld/ld_r18_gflv1_r101_fpn_coco_1x.py', ('cpu',
'cuda'))])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
|
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.jax import core
from keras.src.backend.jax import distribution_lib
from keras.src.backend.jax import image
from keras.src.backend.jax import linalg
from keras.src.backend.jax import math
from keras.src.backend.jax import nn
from keras.src.backend.jax import numpy
from keras.src.backend.jax import random
from keras.src.backend.jax.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.jax.core import Variable
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import compute_output_spec
from keras.src.backend.jax.core import cond
from keras.src.backend.jax.core import convert_to_numpy
from keras.src.backend.jax.core import convert_to_tensor
from keras.src.backend.jax.core import device_scope
from keras.src.backend.jax.core import is_tensor
from keras.src.backend.jax.core import random_seed_dtype
from keras.src.backend.jax.core import scatter
from keras.src.backend.jax.core import shape
from keras.src.backend.jax.core import stop_gradient
from keras.src.backend.jax.core import vectorized_map
from keras.src.backend.jax.rnn import cudnn_ok
from keras.src.backend.jax.rnn import gru
from keras.src.backend.jax.rnn import lstm
from keras.src.backend.jax.rnn import rnn
|
from keras.src.backend.jax import core
from keras.src.backend.jax import distribution_lib
from keras.src.backend.jax import image
from keras.src.backend.jax import linalg
from keras.src.backend.jax import math
from keras.src.backend.jax import nn
from keras.src.backend.jax import numpy
from keras.src.backend.jax import random
from keras.src.backend.jax.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.jax.core import Variable
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import compute_output_spec
from keras.src.backend.jax.core import cond
from keras.src.backend.jax.core import convert_to_numpy
from keras.src.backend.jax.core import convert_to_tensor
from keras.src.backend.jax.core import device_scope
from keras.src.backend.jax.core import is_tensor
from keras.src.backend.jax.core import random_seed_dtype
from keras.src.backend.jax.core import scatter
from keras.src.backend.jax.core import shape
from keras.src.backend.jax.core import stop_gradient
from keras.src.backend.jax.core import vectorized_map
from keras.src.backend.jax.rnn import cudnn_ok
from keras.src.backend.jax.rnn import gru
from keras.src.backend.jax.rnn import lstm
from keras.src.backend.jax.rnn import rnn
|
import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import elu
class ELUTest(testing.TestCase):
def test_config(self):
elu_layer = elu.ELU()
self.run_class_serialization_test(elu_layer)
@pytest.mark.requires_trainable_backend
def test_elu(self):
self.run_layer_test(
elu.ELU,
init_kwargs={},
input_shape=(2, 3, 4),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_correctness(self):
def np_elu(x, alpha=1.0):
return (x > 0) * x + (x <= 0) * alpha * (np.exp(x) - 1)
x = np.random.random((2, 2, 5))
elu_layer = elu.ELU()
self.assertAllClose(elu_layer(x), np_elu(x))
elu_layer = elu.ELU(alpha=0.7)
self.assertAllClose(elu_layer(x), np_elu(x, alpha=0.7))
|
import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import elu
class ELUTest(testing.TestCase):
def test_config(self):
elu_layer = elu.ELU()
self.run_class_serialization_test(elu_layer)
@pytest.mark.requires_trainable_backend
def test_elu(self):
self.run_layer_test(
elu.ELU,
init_kwargs={},
input_shape=(2, 3, 4),
supports_masking=True,
)
def test_correctness(self):
def np_elu(x, alpha=1.0):
return (x > 0) * x + (x <= 0) * alpha * (np.exp(x) - 1)
x = np.random.random((2, 2, 5))
elu_layer = elu.ELU()
self.assertAllClose(elu_layer(x), np_elu(x))
elu_layer = elu.ELU(alpha=0.7)
self.assertAllClose(elu_layer(x), np_elu(x, alpha=0.7))
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
if TYPE_CHECKING:
from pydantic.fields import ModelField
from pydantic import BaseConfig
from docarray.document.base_node import BaseNode
from docarray.proto import NdArrayProto, NodeProto
T = TypeVar('T', bound='Tensor')
class Tensor(np.ndarray, BaseNode):
@classmethod
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def validate(
cls: Type[T], value: Union[T, Any], field: 'ModelField', config: 'BaseConfig'
) -> T:
if isinstance(value, np.ndarray):
return cls.from_ndarray(value)
elif isinstance(value, Tensor):
return cast(T, value)
else:
try:
arr: np.ndarray = np.ndarray(value)
return cls.from_ndarray(arr)
except Exception:
pass # handled below
raise ValueError(f'Expected a numpy.ndarray, got {type(value)}')
@classmethod
def from_ndarray(cls: Type[T], value: np.ndarray) -> T:
return value.view(cls)
def _to_node_protobuf(self: T) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
nd_proto = NdArrayProto()
self.flush_ndarray(nd_proto, value=self)
NodeProto(tensor=nd_proto)
return NodeProto(tensor=nd_proto)
@classmethod
def read_ndarray(cls: Type[T], pb_msg: 'NdArrayProto') -> 'T':
"""
read ndarray from a proto msg
:param pb_msg:
:return: a numpy array
"""
source = pb_msg.dense
if source.buffer:
x = np.frombuffer(source.buffer, dtype=source.dtype)
return cls.from_ndarray(x.reshape(source.shape))
elif len(source.shape) > 0:
return cls.from_ndarray(np.zeros(source.shape))
else:
raise ValueError(f'proto message {pb_msg} cannot be cast to a Tensor')
@staticmethod
def flush_ndarray(pb_msg: 'NdArrayProto', value: 'Tensor'):
pb_msg.dense.buffer = value.tobytes()
pb_msg.dense.ClearField('shape')
pb_msg.dense.shape.extend(list(value.shape))
pb_msg.dense.dtype = value.dtype.str
|
from typing import Union, TypeVar, Any, TYPE_CHECKING, Type, cast
import numpy as np
if TYPE_CHECKING:
from pydantic.fields import ModelField
from pydantic import BaseConfig
from docarray.document.base_node import BaseNode
from docarray.proto import NdArrayProto, NodeProto
T = TypeVar('T', bound='Tensor')
class Tensor(np.ndarray, BaseNode):
@classmethod
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def validate(
cls: Type[T], value: Union[T, Any], field: 'ModelField', config: 'BaseConfig'
) -> T:
if isinstance(value, np.ndarray):
return cls.from_ndarray(value)
elif isinstance(value, Tensor):
return cast(T, value)
else:
try:
arr: np.ndarray = np.ndarray(value)
return cls.from_ndarray(arr)
except Exception:
pass # handled below
raise ValueError(f'Expected a numpy.ndarray, got {type(value)}')
@classmethod
def from_ndarray(cls: Type[T], value: np.ndarray) -> T:
return value.view(cls)
def _to_nested_item_protobuf(self: T) -> 'NodeProto':
"""Convert Document into a nested item protobuf message. This function should
be called when the Document is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
nd_proto = NdArrayProto()
self.flush_ndarray(nd_proto, value=self)
NodeProto(tensor=nd_proto)
return NodeProto(tensor=nd_proto)
@classmethod
def read_ndarray(cls: Type[T], pb_msg: 'NdArrayProto') -> 'T':
"""
read ndarray from a proto msg
:param pb_msg:
:return: a numpy array
"""
source = pb_msg.dense
if source.buffer:
x = np.frombuffer(source.buffer, dtype=source.dtype)
return cls.from_ndarray(x.reshape(source.shape))
elif len(source.shape) > 0:
return cls.from_ndarray(np.zeros(source.shape))
else:
raise ValueError(f'proto message {pb_msg} cannot be cast to a Tensor')
@staticmethod
def flush_ndarray(pb_msg: 'NdArrayProto', value: 'Tensor'):
pb_msg.dense.buffer = value.tobytes()
pb_msg.dense.ClearField('shape')
pb_msg.dense.shape.extend(list(value.shape))
pb_msg.dense.dtype = value.dtype.str
|
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='AudioBytes')
@_register_proto(proto_type_name='audio_bytes')
class AudioBytes(bytes, AbstractType):
"""
Bytes that store an audio and that can be load into an Audio tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self) -> Tuple[np.ndarray, int]:
"""
Load the Audio from the bytes into a numpy.ndarray Audio tensor
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioUrl, NdArray, AudioBytes
import numpy as np
class MyAudio(BaseDoc):
url: AudioUrl
tensor: Optional[NdArray]
bytes: Optional[AudioBytes]
frame_rate: Optional[float]
doc = MyAudio(url='https://www.kozco.com/tech/piano2.wav')
doc.bytes = doc.url.load_bytes()
doc.tensor, doc.frame_rate = doc.bytes.load()
# Note this is equivalent to do
doc.tensor, doc.frame_rate = doc.url.load()
assert isinstance(doc.tensor, np.ndarray)
```
---
:return: np.ndarray representing the Audio as RGB values
"""
from pydub import AudioSegment # type: ignore
segment = AudioSegment.from_file(io.BytesIO(self))
# Convert to float32 using NumPy
samples = np.array(segment.get_array_of_samples())
# Normalise float32 array so that values are between -1.0 and +1.0
samples_norm = samples / 2 ** (segment.sample_width * 8 - 1)
return samples_norm, segment.frame_rate
|
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='AudioBytes')
@_register_proto(proto_type_name='audio_bytes')
class AudioBytes(bytes, AbstractType):
"""
Bytes that store an audio and that can be load into an Audio tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self) -> Tuple[np.ndarray, int]:
"""
Load the Audio from the bytes into a numpy.ndarray Audio tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
import numpy as np
from docarray.typing import AudioUrl
class MyAudio(Document):
url: AudioUrl
tensor: Optional[NdArray]
bytes: Optional[bytes]
doc = MyAudio(url="toydata/hello.wav")
doc.bytes = doc.url.load_bytes()
doc.tensor, doc.frame_rate = doc.bytes.load()
# Note this is equivalent to do
doc.tensor, doc.frame_rate = doc.url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
:return: np.ndarray representing the Audio as RGB values
"""
from pydub import AudioSegment # type: ignore
segment = AudioSegment.from_file(io.BytesIO(self))
# Convert to float32 using NumPy
samples = np.array(segment.get_array_of_samples())
# Normalise float32 array so that values are between -1.0 and +1.0
samples_norm = samples / 2 ** (segment.sample_width * 8 - 1)
return samples_norm, segment.frame_rate
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseEmbeddingSimilarityEvaluator(EmbeddingSimilarityEvaluator):
def __init__(
self,
sentences1: list[str],
sentences2: list[str],
scores: list[float],
batch_size: int = 16,
main_similarity: str | SimilarityFunction | None = None,
similarity_fn_names: list[Literal["cosine", "euclidean", "manhattan", "dot"]] | None = None,
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
sentences1=sentences1,
sentences2=sentences2,
scores=scores,
batch_size=batch_size,
main_similarity=main_similarity,
similarity_fn_names=similarity_fn_names,
name=name,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
precision=None,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self.name, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseEmbeddingSimilarityEvaluator(EmbeddingSimilarityEvaluator):
def __init__(
self,
sentences1: list[str],
sentences2: list[str],
scores: list[float],
batch_size: int = 16,
main_similarity: str | SimilarityFunction | None = None,
similarity_fn_names: list[Literal["cosine", "euclidean", "manhattan", "dot"]] | None = None,
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] | None = None,
truncate_dim: int | None = None,
):
return super().__init__(
sentences1=sentences1,
sentences2=sentences2,
scores=scores,
batch_size=batch_size,
main_similarity=main_similarity,
similarity_fn_names=similarity_fn_names,
name=name,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
precision=precision,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
precision=self.precision,
normalize_embeddings=bool(self.precision),
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self.name, metrics, epoch=epoch, step=step)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.