input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from collections.abc import Sequence
from inspect import signature
from typing import Optional, Union
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import (
BaseDocumentCompressor,
BaseDocumentTransformer,
Document,
)
from pydantic import ConfigDict
class DocumentCompressorPipeline(BaseDocumentCompressor):
"""Document compressor that uses a pipeline of Transformers."""
transformers: list[Union[BaseDocumentTransformer, BaseDocumentCompressor]]
"""List of document filters that are chained together and run in sequence."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Transform a list of documents."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.compress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = _transformer.compress_documents(
documents, query, callbacks=callbacks
)
else:
documents = _transformer.compress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = _transformer.transform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.acompress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = await _transformer.acompress_documents(
documents, query, callbacks=callbacks
)
else:
documents = await _transformer.acompress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = await _transformer.atransform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
|
from inspect import signature
from typing import List, Optional, Sequence, Union
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import (
BaseDocumentCompressor,
BaseDocumentTransformer,
Document,
)
from pydantic import ConfigDict
class DocumentCompressorPipeline(BaseDocumentCompressor):
"""Document compressor that uses a pipeline of Transformers."""
transformers: List[Union[BaseDocumentTransformer, BaseDocumentCompressor]]
"""List of document filters that are chained together and run in sequence."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Transform a list of documents."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.compress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = _transformer.compress_documents(
documents, query, callbacks=callbacks
)
else:
documents = _transformer.compress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = _transformer.transform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.acompress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = await _transformer.acompress_documents(
documents, query, callbacks=callbacks
)
else:
documents = await _transformer.acompress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = await _transformer.atransform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import build_match_cost
from .match_cost import (BBoxL1Cost, ClassificationCost, DiceCost,
FocalLossCost, IoUCost)
__all__ = [
'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost',
'FocalLossCost', 'DiceCost'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import build_match_cost
from .match_cost import BBoxL1Cost, ClassificationCost, FocalLossCost, IoUCost
__all__ = [
'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost',
'FocalLossCost'
]
|
"""Base argparser module for Pod and Deployment runtime"""
import argparse
import os
from jina.enums import PollingType
from jina.helper import random_identity
from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group
def mixin_essential_parser(parser):
"""Mixing in arguments required by every module into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Essential')
gp.add_argument(
'--name',
type=str,
help='''
The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
''',
)
gp.add_argument(
'--workspace',
type=str,
default=None,
help='The working directory for any IO operations in this object. '
'If not set, then derive from its parent `workspace`.',
)
gp.add_argument(
'--log-config',
type=str,
default='default',
help='The YAML config of the logger used in this object.',
)
gp.add_argument(
'--quiet',
action='store_true',
default=False,
help='If set, then no log will be emitted from this object.',
)
gp.add_argument(
'--quiet-error',
action='store_true',
default=False,
help='If set, then exception stack information will not be added to the log',
)
gp.add_argument(
'--workspace-id',
type=str,
default=random_identity(),
help='the UUID for identifying the workspace. When not given a random id will be assigned.'
'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same '
'`workspace-id`.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
def mixin_base_deployment_parser(parser, title='Base Deployment'):
"""Mixing in arguments required by a deployment into the given parser.
The Deployment doesn't have scalable features like shards, replicas and polling
:param parser: the parser instance to which we add arguments
:param title: the title of the create args group
:return: returns the created arg group
"""
mixin_essential_parser(parser)
gp = add_arg_group(parser, title=title)
gp.add_argument(
'--extra-search-paths',
type=str,
default=[],
nargs='*',
help='Extra search paths to be used when loading modules and finding YAML config files.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--timeout-ctrl',
type=int,
default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')),
help='The timeout in milliseconds of the control request, -1 for waiting forever',
)
gp.add_argument(
'--k8s-namespace',
type=str,
help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
return gp
def mixin_scalable_deployment_parser(parser):
"""Mixing in arguments required by a scalable deployment into the given parser.
The deployment is scalable and can have shards, replicas and polling
:param parser: the parser instance to which we add arguments
"""
gp = mixin_base_deployment_parser(parser, title='Scalable Deployment')
gp.add_argument(
'--polling',
type=str,
default=PollingType.ANY.name,
help='''
The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
''',
)
gp.add_argument(
'--shards',
type=int,
default=1,
help='The number of shards in the deployment running at the same time. For more details check '
'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies',
)
gp.add_argument(
'--replicas',
type=int,
default=1,
help='The number of replicas in the deployment',
)
gp.add_argument(
'--native',
action='store_true',
default=False,
help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.',
)
|
"""Base argparser module for Pod and Deployment runtime"""
import argparse
import os
from jina.enums import PollingType
from jina.helper import random_identity
from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group
def mixin_essential_parser(parser):
"""Mixing in arguments required by every module into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Essential')
gp.add_argument(
'--name',
type=str,
help='''
The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
''',
)
gp.add_argument(
'--workspace',
type=str,
default=None,
help='The working directory for any IO operations in this object. '
'If not set, then derive from its parent `workspace`.',
)
gp.add_argument(
'--log-config',
type=str,
default='default',
help='The YAML config of the logger used in this object.',
)
gp.add_argument(
'--quiet',
action='store_true',
default=False,
help='If set, then no log will be emitted from this object.',
)
gp.add_argument(
'--quiet-error',
action='store_true',
default=False,
help='If set, then exception stack information will not be added to the log',
)
gp.add_argument(
'--workspace-id',
type=str,
default=random_identity(),
help='the UUID for identifying the workspace. When not given a random id will be assigned.'
'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same '
'`workspace-id`.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
def mixin_base_ppr_parser(parser):
"""Mixing in arguments required by pod/deployment/runtime module into the given parser.
:param parser: the parser instance to which we add arguments
"""
mixin_essential_parser(parser)
gp = add_arg_group(parser, title='Base Deployment')
gp.add_argument(
'--extra-search-paths',
type=str,
default=[],
nargs='*',
help='Extra search paths to be used when loading modules and finding YAML config files.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--timeout-ctrl',
type=int,
default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')),
help='The timeout in milliseconds of the control request, -1 for waiting forever',
)
parser.add_argument(
'--k8s-namespace',
type=str,
help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--polling',
type=str,
default=PollingType.ANY.name,
help='''
The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
''',
)
|
from docarray import BaseDoc
from docarray.typing import ID
def test_set_id():
class MyDocument(BaseDoc):
id: ID
d = MyDocument(id="123")
assert isinstance(d.id, ID)
assert d.id == "123"
|
from docarray import BaseDocument
from docarray.typing import ID
def test_set_id():
class MyDocument(BaseDocument):
id: ID
d = MyDocument(id="123")
assert isinstance(d.id, ID)
assert d.id == "123"
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.evaluator import DumpResults
from mmengine.runner import Runner
from mmdet.engine.hooks.utils import trigger_visualization_hook
from mmdet.registry import RUNNERS
# TODO: support fuse_conv_bn and format_only
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--out',
type=str,
help='dump predictions to a pickle file for offline evaluation')
parser.add_argument(
'--show', action='store_true', help='show prediction results')
parser.add_argument(
'--show-dir',
help='directory where painted images will be saved. '
'If specified, it will be automatically saved '
'to the work_dir/timestamp/show_dir')
parser.add_argument(
'--wait-time', type=float, default=2, help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.load_from = args.checkpoint
if args.show or args.show_dir:
cfg = trigger_visualization_hook(cfg, args)
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# add `DumpResults` dummy metric
if args.out is not None:
assert args.out.endswith(('.pkl', '.pickle')), \
'The dump file must be a pkl file.'
runner.test_evaluator.metrics.append(
DumpResults(out_file_path=args.out))
# start testing
runner.test()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.evaluator import DumpResults
from mmengine.runner import Runner
from mmdet.engine.hooks.utils import trigger_visualization_hook
from mmdet.registry import RUNNERS
from mmdet.utils import register_all_modules
# TODO: support fuse_conv_bn and format_only
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--out',
type=str,
help='dump predictions to a pickle file for offline evaluation')
parser.add_argument(
'--show', action='store_true', help='show prediction results')
parser.add_argument(
'--show-dir',
help='directory where painted images will be saved. '
'If specified, it will be automatically saved '
'to the work_dir/timestamp/show_dir')
parser.add_argument(
'--wait-time', type=float, default=2, help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.load_from = args.checkpoint
if args.show or args.show_dir:
cfg = trigger_visualization_hook(cfg, args)
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# add `DumpResults` dummy metric
if args.out is not None:
assert args.out.endswith(('.pkl', '.pickle')), \
'The dump file must be a pkl file.'
runner.test_evaluator.metrics.append(
DumpResults(out_file_path=args.out))
# start testing
runner.test()
if __name__ == '__main__':
main()
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import shutil
import tempfile
import unittest
import torch
from transformers import LlamaTokenizerFast, LlavaNextProcessor
from transformers.testing_utils import (
require_vision,
)
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import LlavaNextImageProcessor
@require_vision
class LlavaNextProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = LlavaNextProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = LlavaNextImageProcessor()
tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b")
tokenizer.add_special_tokens({"additional_special_tokens": ["<image>"]})
processor_kwargs = cls.prepare_processor_dict()
processor = LlavaNextProcessor(image_processor, tokenizer, **processor_kwargs)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
def get_tokenizer(self, **kwargs):
return LlavaNextProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return LlavaNextProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
@staticmethod
def prepare_processor_dict():
return {
"chat_template": "{% for message in messages %}{% if message['role'] != 'system' %}{{ message['role'].upper() + ': '}}{% endif %}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>\n' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ content['text'] + ' '}}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ content['text'] + ' '}}{% endgeneration %}{% endfor %}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT:' }}{% endif %}",
"patch_size": 128,
"vision_feature_select_strategy": "default"
} # fmt: skip
# Copied from tests.models.llava.test_processor_llava.LlavaProcessorTest.test_chat_template_is_saved
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded.keys())
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
def test_image_token_filling(self):
processor = self.processor_class.from_pretrained(self.tmpdirname)
processor.patch_size = 14
processor.vision_feature_select_strategy = "default"
processor.image_processor.crop_size = {"height": 336, "width": 336}
processor.image_processor.size = {"shortest_edge": 336}
processor.image_processor.image_grid_pinpoints = [[672, 336]]
# Important to check with non square image
image = torch.randint(0, 2, (3, 503, 316))
expected_image_tokens = 1525
image_token_index = processor.image_token_id
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
inputs = processor(
text=[processor.apply_chat_template(messages)],
images=[image],
return_tensors="pt",
)
image_tokens = (inputs["input_ids"] == image_token_index).sum().item()
self.assertEqual(expected_image_tokens, image_tokens)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import tempfile
import unittest
import torch
from transformers import AutoProcessor, LlamaTokenizerFast, LlavaNextProcessor
from transformers.testing_utils import (
require_vision,
)
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import LlavaNextImageProcessor
@require_vision
class LlavaNextProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = LlavaNextProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = LlavaNextImageProcessor()
tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b")
tokenizer.add_special_tokens({"additional_special_tokens": ["<image>"]})
processor_kwargs = cls.prepare_processor_dict()
processor = LlavaNextProcessor(image_processor, tokenizer, **processor_kwargs)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
def get_tokenizer(self, **kwargs):
return LlavaNextProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return LlavaNextProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
@staticmethod
def prepare_processor_dict():
return {
"chat_template": "{% for message in messages %}{% if message['role'] != 'system' %}{{ message['role'].upper() + ': '}}{% endif %}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>\n' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ content['text'] + ' '}}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ content['text'] + ' '}}{% endgeneration %}{% endfor %}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT:' }}{% endif %}",
"patch_size": 128,
"vision_feature_select_strategy": "default"
} # fmt: skip
# Copied from tests.models.llava.test_processor_llava.LlavaProcessorTest.test_chat_template_is_saved
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded.keys())
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
def test_image_token_filling(self):
processor = AutoProcessor.from_pretrained("llava-hf/llava-v1.6-vicuna-7b-hf")
processor.patch_size = 14
processor.vision_feature_select_strategy = "default"
# Important to check with non square image
image = torch.randint(0, 2, (3, 500, 316))
expected_image_tokens = 1526
image_token_index = 32000
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
inputs = processor(
text=[processor.apply_chat_template(messages)],
images=[image],
return_tensors="pt",
)
image_tokens = (inputs["input_ids"] == image_token_index).sum().item()
self.assertEqual(expected_image_tokens, image_tokens)
|
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
get_paths,
)
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
da: DocList[Inner]
doc = Outer(
img=ImageDoc(),
middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc())),
da=DocList[Inner]([Inner(img=ImageDoc(url='test.png'))]),
)
return doc
def test_is_access_path_valid(nested_doc):
assert _is_access_path_valid(nested_doc.__class__, 'img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__inner__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle')
assert _is_access_path_valid(nested_doc.__class__, 'da__img__url')
def test_is_access_path_not_valid(nested_doc):
assert not _is_access_path_valid(nested_doc.__class__, 'inner')
assert not _is_access_path_valid(nested_doc.__class__, 'some__other__path')
assert not _is_access_path_valid(nested_doc.__class__, 'middle.inner')
def test_get_access_paths():
class Painting(BaseDoc):
title: str
img: ImageDoc
access_paths = Painting._get_access_paths()
assert access_paths == [
'id',
'title',
'img__id',
'img__url',
'img__tensor',
'img__embedding',
'img__bytes_',
]
def test_dict_to_access_paths():
d = {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
casted = _dict_to_access_paths(d)
assert casted == {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
def test_access_path_to_dict():
access_path = 'a__b__c__d__e'
value = 1
result = {'a': {'b': {'c': {'d': {'e': value}}}}}
assert _access_path_to_dict(access_path, value) == result
def test_access_path_dict_to_nested_dict():
d = {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
casted = _access_path_dict_to_nested_dict(d)
assert casted == {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
def test_update_nested_dict():
d1 = {'text': 'hello', 'image': {'tensor': None}}
d2 = {'image': {'url': 'some.png'}}
_update_nested_dicts(d1, d2)
assert d1 == {'text': 'hello', 'image': {'tensor': None, 'url': 'some.png'}}
def test_get_paths():
paths = list(get_paths(patterns='*.py'))
for path in paths:
assert path.endswith('.py')
def test_get_paths_recursive():
paths_rec = list(get_paths(patterns='**', recursive=True))
paths_not_rec = list(get_paths(patterns='**', recursive=False))
assert len(paths_rec) > len(paths_not_rec)
def test_get_paths_exclude():
paths = list(get_paths(patterns='*.py'))
paths_wo_init = list(get_paths(patterns='*.py', exclude_regex='__init__.[a-z]*'))
assert len(paths_wo_init) <= len(paths)
assert '__init__.py' not in paths_wo_init
def test_shallow_copy():
from torch import rand
from docarray import BaseDoc
from docarray.helper import _shallow_copy_doc
from docarray.typing import TorchTensor, VideoUrl
class VideoDoc(BaseDoc):
url: VideoUrl
tensor_video: TorchTensor
class MyDoc(BaseDoc):
docs: DocList[VideoDoc]
tensor: TorchTensor
doc_ori = MyDoc(
docs=DocList[VideoDoc](
[
VideoDoc(
url=f'http://example.ai/videos/{i}',
tensor_video=rand(256),
)
for i in range(10)
]
),
tensor=rand(256),
)
doc_copy = _shallow_copy_doc(doc_ori)
assert doc_copy == doc_ori
|
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
get_paths,
)
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
da: DocList[Inner]
doc = Outer(
img=ImageDoc(),
middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc())),
da=DocList[Inner]([Inner(img=ImageDoc(url='test.png'))]),
)
return doc
def test_is_access_path_valid(nested_doc):
assert _is_access_path_valid(nested_doc.__class__, 'img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__inner__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle')
assert _is_access_path_valid(nested_doc.__class__, 'da__img__url')
def test_is_access_path_not_valid(nested_doc):
assert not _is_access_path_valid(nested_doc.__class__, 'inner')
assert not _is_access_path_valid(nested_doc.__class__, 'some__other__path')
assert not _is_access_path_valid(nested_doc.__class__, 'middle.inner')
def test_get_access_paths():
class Painting(BaseDoc):
title: str
img: ImageDoc
access_paths = Painting._get_access_paths()
assert access_paths == [
'id',
'title',
'img__id',
'img__url',
'img__tensor',
'img__embedding',
'img__bytes_',
]
def test_dict_to_access_paths():
d = {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
casted = _dict_to_access_paths(d)
assert casted == {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
def test_access_path_to_dict():
access_path = 'a__b__c__d__e'
value = 1
result = {'a': {'b': {'c': {'d': {'e': value}}}}}
assert _access_path_to_dict(access_path, value) == result
def test_access_path_dict_to_nested_dict():
d = {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
casted = _access_path_dict_to_nested_dict(d)
assert casted == {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
def test_update_nested_dict():
d1 = {'text': 'hello', 'image': {'tensor': None}}
d2 = {'image': {'url': 'some.png'}}
_update_nested_dicts(d1, d2)
assert d1 == {'text': 'hello', 'image': {'tensor': None, 'url': 'some.png'}}
def test_get_paths():
paths = list(get_paths(patterns='*.py'))
for path in paths:
assert path.endswith('.py')
def test_get_paths_recursive():
paths_rec = list(get_paths(patterns='**', recursive=True))
paths_not_rec = list(get_paths(patterns='**', recursive=False))
assert len(paths_rec) > len(paths_not_rec)
def test_get_paths_exclude():
paths = list(get_paths(patterns='*.py'))
paths_wo_init = list(get_paths(patterns='*.py', exclude_regex='__init__.[a-z]*'))
assert len(paths_wo_init) <= len(paths)
assert '__init__.py' not in paths_wo_init
|
import asyncio
import datetime
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.toggl.dto import TogglTrackItem, TogglOutFormat
class TogglReader(BaseReader):
def __init__(
self, api_token: str, user_agent: str = "llama_index_toggl_reader"
) -> None:
"""Initialize with parameters."""
super().__init__()
self.api_token = api_token
self.user_agent = user_agent
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def load_data(
self,
workspace_id: str,
project_id: str,
start_date: Optional[datetime.datetime] = None,
end_date: Optional[datetime.datetime] = datetime.datetime.now(),
out_format: TogglOutFormat = TogglOutFormat.json,
) -> List[Document]:
"""
Load data from Toggl.
Args:
workspace_id (str): The workspace ID.
project_id (str): The project ID.
start_date (Optional[datetime.datetime]): The start date.
end_date (Optional[datetime.datetime]): The end date.
out_format (TogglOutFormat): The output format.
"""
return self.loop.run_until_complete(
self.aload_data(workspace_id, project_id, start_date, end_date, out_format)
)
async def aload_data(
self,
workspace_id: str,
project_id: str,
start_date: Optional[datetime.datetime],
end_date: Optional[datetime.datetime],
out_format: TogglOutFormat,
) -> List[Document]:
"""Load time entries from Toggl."""
from toggl.api_client import TogglClientApi
client = TogglClientApi(
{
"token": self.api_token,
"workspace_id": workspace_id,
"user_agent": self.user_agent,
}
)
project_times = client.get_project_times(project_id, start_date, end_date)
raw_items = [
TogglTrackItem.model_validate(raw_item)
for raw_item in project_times["data"]
]
items = []
for item in raw_items:
if out_format == TogglOutFormat.json:
text = item.model_dump_json()
elif out_format == TogglOutFormat.markdown:
text = f"""# {item.description}
**Start:** {item.start:%Y-%m-%d %H:%M:%S%z}
**End:** {item.end:%Y-%m-%d %H:%M:%S%z}
**Duration:** {self.milliseconds_to_postgresql_interval(item.dur)}
**Tags:** {",".join(item.tags)}
"""
doc = Document(text=text)
doc.metadata = {**doc.metadata, **item.dict()}
items.append(doc)
return items
def milliseconds_to_postgresql_interval(self, milliseconds):
seconds, milliseconds = divmod(milliseconds, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
interval = ""
if days > 0:
interval += f"{days}d"
if hours > 0:
interval += f"{hours}h"
if minutes > 0:
interval += f"{minutes}m"
if seconds > 0 or milliseconds > 0:
interval += f"{seconds}s"
if milliseconds > 0:
interval += f"{milliseconds}ms"
return interval
|
import asyncio
import datetime
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.toggl.dto import TogglTrackItem, TogglOutFormat
class TogglReader(BaseReader):
def __init__(
self, api_token: str, user_agent: str = "llama_index_toggl_reader"
) -> None:
"""Initialize with parameters."""
super().__init__()
self.api_token = api_token
self.user_agent = user_agent
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def load_data(
self,
workspace_id: str,
project_id: str,
start_date: Optional[datetime.datetime] = None,
end_date: Optional[datetime.datetime] = datetime.datetime.now(),
out_format: TogglOutFormat = TogglOutFormat.json,
) -> List[Document]:
"""Load data from Toggl.
Args:
workspace_id (str): The workspace ID.
project_id (str): The project ID.
start_date (Optional[datetime.datetime]): The start date.
end_date (Optional[datetime.datetime]): The end date.
out_format (TogglOutFormat): The output format.
"""
return self.loop.run_until_complete(
self.aload_data(workspace_id, project_id, start_date, end_date, out_format)
)
async def aload_data(
self,
workspace_id: str,
project_id: str,
start_date: Optional[datetime.datetime],
end_date: Optional[datetime.datetime],
out_format: TogglOutFormat,
) -> List[Document]:
"""Load time entries from Toggl."""
from toggl.api_client import TogglClientApi
client = TogglClientApi(
{
"token": self.api_token,
"workspace_id": workspace_id,
"user_agent": self.user_agent,
}
)
project_times = client.get_project_times(project_id, start_date, end_date)
raw_items = [
TogglTrackItem.model_validate(raw_item)
for raw_item in project_times["data"]
]
items = []
for item in raw_items:
if out_format == TogglOutFormat.json:
text = item.model_dump_json()
elif out_format == TogglOutFormat.markdown:
text = f"""# {item.description}
**Start:** {item.start:%Y-%m-%d %H:%M:%S%z}
**End:** {item.end:%Y-%m-%d %H:%M:%S%z}
**Duration:** {self.milliseconds_to_postgresql_interval(item.dur)}
**Tags:** {",".join(item.tags)}
"""
doc = Document(text=text)
doc.metadata = {**doc.metadata, **item.dict()}
items.append(doc)
return items
def milliseconds_to_postgresql_interval(self, milliseconds):
seconds, milliseconds = divmod(milliseconds, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
interval = ""
if days > 0:
interval += f"{days}d"
if hours > 0:
interval += f"{hours}h"
if minutes > 0:
interval += f"{minutes}m"
if seconds > 0 or milliseconds > 0:
interval += f"{seconds}s"
if milliseconds > 0:
interval += f"{milliseconds}ms"
return interval
|
from pathlib import Path
import numpy as np
import paddlehub as hub
import pytest
from jina import Document, DocumentArray, Executor
from ...text_paddle import TextPaddleEncoder
@pytest.fixture(scope='function')
def model():
return hub.Module(name='ernie_tiny')
@pytest.fixture(scope='function')
def content():
return 'hello world'
@pytest.fixture(scope='function')
def document_array(content):
return DocumentArray([Document(content=content)])
@pytest.fixture(scope='function')
def parameters():
return {'traverse_paths': ['r'], 'batch_size': 10}
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
def test_text_paddle(model, document_array, content, parameters):
ex = TextPaddleEncoder()
assert ex.on_gpu is False
ex.encode(document_array, parameters)
for doc in document_array:
assert isinstance(doc.embedding, np.ndarray)
assert doc.embedding.shape == (1024,)
embeds = model.get_embedding([[content]])
pooled_features = []
for embed in embeds:
pooled_feature, _ = embed
pooled_features.append(pooled_feature)
assert (pooled_features == document_array[0].embedding).all()
|
from pathlib import Path
import pytest
import numpy as np
import paddlehub as hub
from jina import Document, DocumentArray, Executor
from ...text_paddle import TextPaddleEncoder
@pytest.fixture(scope='function')
def model():
return hub.Module(name='ernie_tiny')
@pytest.fixture(scope='function')
def content():
return 'hello world'
@pytest.fixture(scope='function')
def document_array(content):
return DocumentArray([Document(content=content)])
@pytest.fixture(scope='function')
def parameters():
return {'traverse_paths': ['r'], 'batch_size': 10}
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
def test_text_paddle(model, document_array, content, parameters):
ex = TextPaddleEncoder()
assert ex.on_gpu is False
ex.encode(document_array, parameters)
for doc in document_array:
assert isinstance(doc.embedding, np.ndarray)
assert doc.embedding.shape == (1024,)
embeds = model.get_embedding([[content]])
pooled_features = []
for embed in embeds:
pooled_feature, _ = embed
pooled_features.append(pooled_feature)
assert (pooled_features == document_array[0].embedding).all()
|
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype.transforms import Transform
from torchvision.transforms import functional as _F
class ToTensor(Transform):
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
|
import warnings
from typing import Any, Dict, List, Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import Transform
from torchvision.transforms import functional as _F
from typing_extensions import Literal
from ._transform import _RandomApplyTransform
from .utils import is_simple_tensor, query_chw
class ToTensor(Transform):
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
# TODO: in other PR (?) undeprecate those and make them use _rgb_to_gray?
class Grayscale(Transform):
_transformed_types = (
datapoints.Image,
PIL.Image.Image,
is_simple_tensor,
datapoints.Video,
)
def __init__(self, num_output_channels: Literal[1, 3] = 1) -> None:
deprecation_msg = (
f"The transform `Grayscale(num_output_channels={num_output_channels})` "
f"is deprecated and will be removed in a future release."
)
if num_output_channels == 1:
replacement_msg = (
"transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY)"
)
else:
replacement_msg = (
"transforms.Compose(\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY),\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.GRAY, color_space=ColorSpace.RGB),\n"
")"
)
warnings.warn(f"{deprecation_msg} Instead, please use\n\n{replacement_msg}")
super().__init__()
self.num_output_channels = num_output_channels
def _transform(
self, inpt: Union[datapoints.ImageType, datapoints.VideoType], params: Dict[str, Any]
) -> Union[datapoints.ImageType, datapoints.VideoType]:
output = _F.rgb_to_grayscale(inpt, num_output_channels=self.num_output_channels)
if isinstance(inpt, (datapoints.Image, datapoints.Video)):
output = inpt.wrap_like(inpt, output) # type: ignore[arg-type]
return output
class RandomGrayscale(_RandomApplyTransform):
_transformed_types = (
datapoints.Image,
PIL.Image.Image,
is_simple_tensor,
datapoints.Video,
)
def __init__(self, p: float = 0.1) -> None:
warnings.warn(
"The transform `RandomGrayscale(p=...)` is deprecated and will be removed in a future release. "
"Instead, please use\n\n"
"transforms.RandomApply(\n"
" transforms.Compose(\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY),\n"
" transforms.ConvertImageColorSpace(old_color_space=ColorSpace.GRAY, color_space=ColorSpace.RGB),\n"
" )\n"
" p=...,\n"
")"
)
super().__init__(p=p)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
num_input_channels, *_ = query_chw(flat_inputs)
return dict(num_input_channels=num_input_channels)
def _transform(
self, inpt: Union[datapoints.ImageType, datapoints.VideoType], params: Dict[str, Any]
) -> Union[datapoints.ImageType, datapoints.VideoType]:
output = _F.rgb_to_grayscale(inpt, num_output_channels=params["num_input_channels"])
if isinstance(inpt, (datapoints.Image, datapoints.Video)):
output = inpt.wrap_like(inpt, output) # type: ignore[arg-type]
return output
|
from .audio_clip_encoder import AudioCLIPEncoder
|
from .audio_clip_encoder import AudioCLIPEncoder
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
import argparse
from jina.enums import GatewayProtocolType
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
try:
total_time = 0
total_success = 0
for j in range(args.attempts):
with TimeContext(
f'ping {args.target} on {args.host} at {j} round', default_logger
) as tc:
if args.target == 'executor':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = WorkerRuntime.is_ready(f'{hostname}:{port}')
elif args.target == 'gateway':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = GatewayRuntime.is_ready(
f'{hostname}:{port}',
protocol=GatewayProtocolType.from_string(protocol),
)
elif args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=args.timeout)
if not r:
default_logger.warning(
'not responding, attempt (%d/%d) in 1s'
% (j + 1, args.attempts)
)
else:
total_success += 1
total_time += tc.duration
if args.attempts > 0:
time.sleep(1)
if total_success < args.attempts:
default_logger.debug(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.attempts) * 100,
args.attempts - total_success,
args.attempts,
)
)
if total_success > 0:
default_logger.debug(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
if total_success >= args.min_successful_attempts:
default_logger.debug(
f'readiness check succeeded {total_success} times!!!'
)
exit(0)
else:
default_logger.debug(
f'readiness check succeeded {total_success} times, less than {args.min_successful_attempts}'
)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
import argparse
from jina.enums import GatewayProtocolType
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
try:
total_time = 0
total_success = 0
for j in range(args.attempts):
with TimeContext(
f'ping {args.target} on {args.host} at {j} round', default_logger
) as tc:
if args.target == 'executor':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = WorkerRuntime.is_ready(f'{hostname}:{port}')
elif args.target == 'gateway':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = GatewayRuntime.is_ready(
f'{hostname}:{port}',
protocol=GatewayProtocolType.from_string(protocol),
)
elif args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=args.timeout)
if not r:
default_logger.warning(
'not responding, attempt (%d/%d) in 1s'
% (j + 1, args.attempts)
)
else:
total_success += 1
total_time += tc.duration
if args.attempts > 0:
time.sleep(1)
if total_success < args.attempts:
default_logger.warning(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.attempts) * 100,
args.attempts - total_success,
args.attempts,
)
)
if total_success > 0:
default_logger.info(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
if total_success >= args.min_successful_attempts:
default_logger.info(
f'readiness check succeeded {total_success} times!!!'
)
exit(0)
else:
default_logger.info(
f'readiness check succeeded {total_success} times, less than {args.min_successful_attempts}'
)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
"""(Unofficial) Google Keep reader using gkeepapi."""
import json
import os
from typing import Any, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class GoogleKeepReader(BaseReader):
"""
Google Keep reader.
Reads notes from Google Keep
"""
def load_data(self, document_ids: List[str]) -> List[Document]:
"""
Load data from the document_ids.
Args:
document_ids (List[str]): a list of note ids.
"""
keep = self._get_keep()
if document_ids is None:
raise ValueError('Must specify a "document_ids" in `load_kwargs`.')
results = []
for note_id in document_ids:
note = keep.get(note_id)
if note is None:
raise ValueError(f"Note with id {note_id} not found.")
text = f"Title: {note.title}\nContent: {note.text}"
results.append(Document(text=text, extra_info={"note_id": note_id}))
return results
def load_all_notes(self) -> List[Document]:
"""Load all notes from Google Keep."""
keep = self._get_keep()
notes = keep.all()
results = []
for note in notes:
text = f"Title: {note.title}\nContent: {note.text}"
results.append(Document(text=text, extra_info={"note_id": note.id}))
return results
def _get_keep(self) -> Any:
import gkeepapi
"""Get a Google Keep object with login."""
# Read username and password from keep_credentials.json
if os.path.exists("keep_credentials.json"):
with open("keep_credentials.json") as f:
credentials = json.load(f)
else:
raise RuntimeError("Failed to load keep_credentials.json.")
keep = gkeepapi.Keep()
success = keep.login(credentials["username"], credentials["password"])
if not success:
raise RuntimeError("Failed to login to Google Keep.")
return keep
if __name__ == "__main__":
reader = GoogleKeepReader()
print(
reader.load_data(
document_ids=[
"1eKU7kGn8eJCErZ52OC7vCzHDSQaspFYGHHCiTX_IvhFOc7ZQZVJhTIDFMdTJOPiejOk"
]
)
)
|
"""(Unofficial) Google Keep reader using gkeepapi."""
import json
import os
from typing import Any, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class GoogleKeepReader(BaseReader):
"""Google Keep reader.
Reads notes from Google Keep
"""
def load_data(self, document_ids: List[str]) -> List[Document]:
"""Load data from the document_ids.
Args:
document_ids (List[str]): a list of note ids.
"""
keep = self._get_keep()
if document_ids is None:
raise ValueError('Must specify a "document_ids" in `load_kwargs`.')
results = []
for note_id in document_ids:
note = keep.get(note_id)
if note is None:
raise ValueError(f"Note with id {note_id} not found.")
text = f"Title: {note.title}\nContent: {note.text}"
results.append(Document(text=text, extra_info={"note_id": note_id}))
return results
def load_all_notes(self) -> List[Document]:
"""Load all notes from Google Keep."""
keep = self._get_keep()
notes = keep.all()
results = []
for note in notes:
text = f"Title: {note.title}\nContent: {note.text}"
results.append(Document(text=text, extra_info={"note_id": note.id}))
return results
def _get_keep(self) -> Any:
import gkeepapi
"""Get a Google Keep object with login."""
# Read username and password from keep_credentials.json
if os.path.exists("keep_credentials.json"):
with open("keep_credentials.json") as f:
credentials = json.load(f)
else:
raise RuntimeError("Failed to load keep_credentials.json.")
keep = gkeepapi.Keep()
success = keep.login(credentials["username"], credentials["password"])
if not success:
raise RuntimeError("Failed to login to Google Keep.")
return keep
if __name__ == "__main__":
reader = GoogleKeepReader()
print(
reader.load_data(
document_ids=[
"1eKU7kGn8eJCErZ52OC7vCzHDSQaspFYGHHCiTX_IvhFOc7ZQZVJhTIDFMdTJOPiejOk"
]
)
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from typing import Tuple
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_batch_input_shape(model: nn.Module, ori_wh: Tuple[int,
int]) -> dict:
cfg = model.cfg
w, h = ori_wh
cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8), 'img_id': 0}
data = test_pipeline(data)
_, data_sample = model.data_preprocessor([data], False)
batch_input_shape = data_sample[0].batch_input_shape
return batch_input_shape
def pack_data(frame_resize: np.ndarray, batch_input_shape: Tuple[int, int],
ori_shape: Tuple[int, int]) -> dict:
assert frame_resize.shape[:2] == batch_input_shape
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_shape':
batch_input_shape,
'ori_shape':
ori_shape,
'scale_factor': (batch_input_shape[0] / ori_shape[0],
batch_input_shape[1] / ori_shape[1])
})
frame_resize = torch.from_numpy(frame_resize).permute((2, 0, 1))
data = {'inputs': frame_resize, 'data_sample': data_sample}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# register all modules in mmdet into the registries
register_all_modules()
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
batch_input_shape = prefetch_batch_input_shape(
model, (video_origin.width, video_origin.height))
ori_shape = (video_origin.height, video_origin.width)
resize_wh = batch_input_shape[::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for i, (frame_resize, frame_origin) in enumerate(
zip(track_iter_progress(video_resize), video_origin)):
data = pack_data(frame_resize, batch_input_shape, ori_shape)
result = model.test_step([data])[0]
visualizer.add_datasample(
name='video',
image=frame_origin,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame_mask = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import numpy as np
import torch
import torch.nn as nn
from mmcv.transforms import Compose
from mmengine.utils import track_iter_progress
from mmdet.apis import init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
from mmdet.utils.typing import Tuple
try:
import ffmpegcv
except ImportError:
raise ImportError(
'Please install ffmpegcv with:\n\n pip install ffmpegcv')
def parse_args():
parser = argparse.ArgumentParser(
description='MMDetection video demo with GPU acceleration')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--nvdecode', action='store_true', help='Use NVIDIA decoder')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def prefetch_batch_input_shape(model: nn.Module, ori_wh: Tuple[int,
int]) -> dict:
cfg = model.cfg
w, h = ori_wh
cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline)
data = {'img': np.zeros((h, w, 3), dtype=np.uint8), 'img_id': 0}
data = test_pipeline(data)
_, data_sample = model.data_preprocessor([data], False)
batch_input_shape = data_sample[0].batch_input_shape
return batch_input_shape
def pack_data(frame_resize: np.ndarray, batch_input_shape: Tuple[int, int],
ori_shape: Tuple[int, int]) -> dict:
assert frame_resize.shape[:2] == batch_input_shape
data_sample = DetDataSample()
data_sample.set_metainfo({
'img_shape':
batch_input_shape,
'ori_shape':
ori_shape,
'scale_factor': (batch_input_shape[0] / ori_shape[0],
batch_input_shape[1] / ori_shape[1])
})
frame_resize = torch.from_numpy(frame_resize).permute((2, 0, 1))
data = {'inputs': frame_resize, 'data_sample': data_sample}
return data
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# register all modules in mmdet into the registries
register_all_modules()
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
if args.nvdecode:
VideoCapture = ffmpegcv.VideoCaptureNV
else:
VideoCapture = ffmpegcv.VideoCapture
video_origin = VideoCapture(args.video)
batch_input_shape = prefetch_batch_input_shape(
model, (video_origin.width, video_origin.height))
ori_shape = (video_origin.height, video_origin.width)
resize_wh = batch_input_shape[::-1]
video_resize = VideoCapture(
args.video,
resize=resize_wh,
resize_keepratio=True,
resize_keepratioalign='topleft')
video_writer = None
if args.out:
video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps)
with torch.no_grad():
for i, (frame_resize, frame_origin) in enumerate(
zip(track_iter_progress(video_resize), video_origin)):
data = pack_data(frame_resize, batch_input_shape, ori_shape)
result = model.test_step([data])[0]
visualizer.add_datasample(
name='video',
image=frame_origin,
data_sample=result,
draw_gt=False,
show=False,
pred_score_thr=args.score_thr)
frame_mask = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame_mask, 'video', args.wait_time)
if args.out:
video_writer.write(frame_mask)
if video_writer:
video_writer.release()
video_origin.release()
video_resize.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
from torch import Tensor
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import (BaseBoxes, HorizontalBoxes, bbox2distance,
distance2bbox, get_box_tensor)
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, clip_border: Optional[bool] = True, **kwargs) -> None:
super().__init__(**kwargs)
self.clip_border = clip_border
def encode(self,
points: Tensor,
gt_bboxes: Union[Tensor, BaseBoxes],
max_dis: Optional[float] = None,
eps: float = 0.1) -> Tensor:
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor or :obj:`BaseBoxes`): Shape (N, 4), The format
is "xyxy"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4).
"""
gt_bboxes = get_box_tensor(gt_bboxes)
assert points.size(0) == gt_bboxes.size(0)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(
self,
points: Tensor,
pred_bboxes: Tensor,
max_shape: Optional[Union[Sequence[int], Tensor,
Sequence[Sequence[int]]]] = None
) -> Union[Tensor, BaseBoxes]:
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Union[Tensor, :obj:`BaseBoxes`]: Boxes with shape (N, 4) or
(B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
bboxes = distance2bbox(points, pred_bboxes, max_shape)
if self.use_box_type:
bboxes = HorizontalBoxes(bboxes)
return bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import (HorizontalBoxes, bbox2distance,
distance2bbox, get_box_tensor)
from .base_bbox_coder import BaseBBoxCoder
@TASK_UTILS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
Args:
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self, clip_border=True, **kwargs):
super().__init__(**kwargs)
self.clip_border = clip_border
def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor or :obj:`BaseBoxes`): Shape (N, 4), The format
is "xyxy"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4).
"""
gt_bboxes = get_box_tensor(gt_bboxes)
assert points.size(0) == gt_bboxes.size(0)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
def decode(self, points, pred_bboxes, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Union[Tensor, :obj:`BaseBoxes`]: Boxes with shape (N, 4) or
(B, N, 4)
"""
assert points.size(0) == pred_bboxes.size(0)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
bboxes = distance2bbox(points, pred_bboxes, max_shape)
if self.use_box_type:
bboxes = HorizontalBoxes(bboxes)
return bboxes
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEmbeddingSimilarityEvaluator,
SparseEncoder,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
|
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEmbeddingSimilarityEvaluator,
SparseEncoder,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
print(dev_evaluator.primary_metric)
print(results[dev_evaluator.primary_metric])
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
from mmengine.fileio import load
arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
# detectron replace bn with affine channel layer
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_s'])
bn_size = state_dict[torch_name + '.weight'].size()
state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size)
state_dict[torch_name + '.running_var'] = torch.ones(bn_size)
converted_names.add(caffe_name + '_b')
converted_names.add(caffe_name + '_s')
def convert_conv_fc(blobs, state_dict, caffe_name, torch_name,
converted_names):
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_w'])
converted_names.add(caffe_name + '_w')
if caffe_name + '_b' in blobs:
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
converted_names.add(caffe_name + '_b')
def convert(src, dst, depth):
"""Convert keys in detectron pretrained ResNet models to pytorch style."""
# load arch_settings
if depth not in arch_settings:
raise ValueError('Only support ResNet-50 and ResNet-101 currently')
block_nums = arch_settings[depth]
# load caffe model
caffe_model = load(src, encoding='latin1')
blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
for i in range(1, len(block_nums) + 1):
for j in range(block_nums[i - 1]):
if j == 0:
convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1',
f'layer{i}.{j}.downsample.0', converted_names)
convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn',
f'layer{i}.{j}.downsample.1', converted_names)
for k, letter in enumerate(['a', 'b', 'c']):
convert_conv_fc(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}',
f'layer{i}.{j}.conv{k+1}', converted_names)
convert_bn(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}_bn',
f'layer{i}.{j}.bn{k + 1}', converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'Not Convert: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument('depth', type=int, help='ResNet model depth')
args = parser.parse_args()
convert(args.src, args.dst, args.depth)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import mmcv
import torch
arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
# detectron replace bn with affine channel layer
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_s'])
bn_size = state_dict[torch_name + '.weight'].size()
state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size)
state_dict[torch_name + '.running_var'] = torch.ones(bn_size)
converted_names.add(caffe_name + '_b')
converted_names.add(caffe_name + '_s')
def convert_conv_fc(blobs, state_dict, caffe_name, torch_name,
converted_names):
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_w'])
converted_names.add(caffe_name + '_w')
if caffe_name + '_b' in blobs:
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
converted_names.add(caffe_name + '_b')
def convert(src, dst, depth):
"""Convert keys in detectron pretrained ResNet models to pytorch style."""
# load arch_settings
if depth not in arch_settings:
raise ValueError('Only support ResNet-50 and ResNet-101 currently')
block_nums = arch_settings[depth]
# load caffe model
caffe_model = mmcv.load(src, encoding='latin1')
blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
for i in range(1, len(block_nums) + 1):
for j in range(block_nums[i - 1]):
if j == 0:
convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1',
f'layer{i}.{j}.downsample.0', converted_names)
convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn',
f'layer{i}.{j}.downsample.1', converted_names)
for k, letter in enumerate(['a', 'b', 'c']):
convert_conv_fc(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}',
f'layer{i}.{j}.conv{k+1}', converted_names)
convert_bn(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}_bn',
f'layer{i}.{j}.bn{k + 1}', converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'Not Convert: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument('depth', type=int, help='ResNet model depth')
args = parser.parse_args()
convert(args.src, args.dst, args.depth)
if __name__ == '__main__':
main()
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(pad_size_divisor=64),
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64)))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
neck=dict(
type='FPN_CARAFE',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1,
compressed_channels=64)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
import os
import pickle
from pathlib import Path
from typing import Optional, Tuple
from jina import DocumentArray, Executor, requests
from jina.excepts import PretrainedModelFileDoesNotExist
from jina_commons.batching import get_docs_batch_generator
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-idf sparse embeddings
:param path_vectorizer: path of the pre-trained tfidf sklearn vectorizer
:param default_batch_size: Default batch size, used if ``batch_size`` is not
provided as a parameter in the request
:param default_traversal_paths: Default traversal paths, used if ``traversal_paths``
are not provided as a parameter in the request.
"""
def __init__(
self,
path_vectorizer: Optional[str] = None,
default_batch_size: int = 2048,
default_traversal_paths: Tuple[str] = ('r',),
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
if path_vectorizer is None:
path_vectorizer = str(
Path(__file__).parent / 'model/tfidf_vectorizer.pickle'
)
self.path_vectorizer = path_vectorizer
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
if os.path.exists(self.path_vectorizer):
self.tfidf_vectorizer = pickle.load(open(self.path_vectorizer, 'rb'))
else:
raise PretrainedModelFileDoesNotExist(
f'{self.path_vectorizer} not found, cannot find a fitted tfidf_vectorizer'
)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, **kwargs):
"""
Generate the TF-IDF feature vector for all text documents.
:param docs: documents sent to the encoder. The docs must have `text`.
By default, the input `text` must be a `list` of `str`.
:param parameters: dictionary to define the `traversal_paths` and the
`batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
for document_batch in document_batches_generator:
iterable_of_texts = [d.text for d in document_batch]
embedding_matrix = self.tfidf_vectorizer.transform(iterable_of_texts)
for doc, doc_embedding in zip(document_batch, embedding_matrix):
doc.embedding = doc_embedding
|
import os
import pickle
from typing import Optional, Iterable, Tuple
from jina import Executor, requests, DocumentArray
from jina.excepts import PretrainedModelFileDoesNotExist
from jina_commons.batching import get_docs_batch_generator
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-idf sparse embeddings
:param path_vectorizer: path of the pre-trained tfidf sklearn vectorizer
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param default_batch_size: fallback batch size in case there is not batch size sent in the request
"""
def __init__(
self,
path_vectorizer: str = 'model/tfidf_vectorizer.pickle',
default_batch_size: int = 2048,
default_traversal_paths: Tuple[str] = ('r', ),
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.path_vectorizer = path_vectorizer
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
if os.path.exists(self.path_vectorizer):
self.tfidf_vectorizer = pickle.load(open(self.path_vectorizer, 'rb'))
else:
raise PretrainedModelFileDoesNotExist(
f'{self.path_vectorizer} not found, cannot find a fitted tfidf_vectorizer'
)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs):
"""
Generate the TF-IDF feature vector and store it in `doc.embedding` for each `doc` in `docs`.
:param docs: documents sent to the encoder. The docs must have `text`.
By default, the input `text` must be a `list` of `str`.
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.default_traversal_paths),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text'
)
self._create_embeddings(document_batches_generator)
def _create_embeddings(self, document_batches_generator: Iterable):
"""Update the documents with the embeddings generated by a tfidf"""
for document_batch in document_batches_generator:
iterable_of_texts = [d.text for d in document_batch]
embedding_matrix = self.tfidf_vectorizer.transform(iterable_of_texts)
for doc, doc_embedding in zip(document_batch, embedding_matrix):
doc.embedding = doc_embedding
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=i) for i in range(4)]
storage = DocVec[MyDoc](docs)._storage
doc = MyDoc.from_view(ColumnStorageView(0, storage))
assert doc.is_view()
assert doc.id == '0'
assert (doc.tensor == np.zeros(10)).all()
assert doc.name == 'hello'
storage.columns['id'][0] = '12345'
storage.columns['tensor'][0] = np.ones(10)
storage.columns['name'][0] = 'byebye'
assert doc.id == '12345'
assert (doc.tensor == np.ones(10)).all()
assert doc.name == 'byebye'
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocArrayStacked
from docarray.array.stacked.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=i) for i in range(4)]
storage = DocArrayStacked[MyDoc](docs)._storage
doc = MyDoc.from_view(ColumnStorageView(0, storage))
assert doc.is_view()
assert doc.id == '0'
assert (doc.tensor == np.zeros(10)).all()
assert doc.name == 'hello'
storage.columns['id'][0] = '12345'
storage.columns['tensor'][0] = np.ones(10)
storage.columns['name'][0] = 'byebye'
assert doc.id == '12345'
assert (doc.tensor == np.ones(10)).all()
assert doc.name == 'byebye'
|
from cupy import * # noqa: F403
# from cupy import * doesn't overwrite these builtin names
from cupy import abs, max, min, round # noqa: F401
# These imports may overwrite names from the import * above.
from ._aliases import * # noqa: F403
# See the comment in the numpy __init__.py
__import__(__package__ + '.linalg')
__import__(__package__ + '.fft')
__array_api_version__ = '2024.12'
|
from cupy import * # noqa: F403
# from cupy import * doesn't overwrite these builtin names
from cupy import abs, max, min, round # noqa: F401
# These imports may overwrite names from the import * above.
from ._aliases import * # noqa: F403
# See the comment in the numpy __init__.py
__import__(__package__ + '.linalg')
__import__(__package__ + '.fft')
from ..common._helpers import * # noqa: F401,F403
__array_api_version__ = '2024.12'
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self,
port: int = None,
arg1: str = None,
arg2: str = None,
arg3: str = 'default-arg3',
**kwargs
):
super().__init__(**kwargs)
self.port = port
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def teardown(self):
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
self.server.should_exit = True
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self,
port: int = None,
arg1: str = None,
arg2: str = None,
arg3: str = 'default-arg3',
**kwargs
):
super().__init__(**kwargs)
self.port = port
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def teardown(self):
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
self.server.should_exit = True
@property
def should_exit(self) -> bool:
return self.server.should_exit
|
# CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchHardTripletLoss import (
BatchHardTripletLoss,
BatchHardTripletLossDistanceFunction,
)
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CachedMultipleNegativesSymmetricRankingLoss import (
CachedMultipleNegativesSymmetricRankingLoss,
)
from .ContrastiveLoss import ContrastiveLoss, SiameseDistanceMetric
from .ContrastiveTensionLoss import (
ContrastiveTensionDataLoader,
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
)
from .CosineSimilarityLoss import CosineSimilarityLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .DistillKLDivLoss import DistillKLDivLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .MarginMSELoss import MarginMSELoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MatryoshkaLoss import MatryoshkaLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .SoftmaxLoss import SoftmaxLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"CachedMultipleNegativesSymmetricRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
"DistillKLDivLoss",
]
|
# CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CachedMultipleNegativesSymmetricRankingLoss import CachedMultipleNegativesSymmetricRankingLoss
from .ContrastiveLoss import ContrastiveLoss, SiameseDistanceMetric
from .ContrastiveTensionLoss import (
ContrastiveTensionDataLoader,
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
)
from .CosineSimilarityLoss import CosineSimilarityLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .MarginMSELoss import MarginMSELoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MatryoshkaLoss import MatryoshkaLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .SoftmaxLoss import SoftmaxLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"CachedMultipleNegativesSymmetricRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
]
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
columns: Optional[List[Tuple[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='TEXT', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='integer', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from docarray.math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _normalize_columns(self, columns):
columns = super()._normalize_columns(columns)
for i in range(len(columns)):
columns[i] = (
columns[i][0],
self._map_type(columns[i][1]),
)
return columns
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
**kwargs,
):
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
self._config.columns = self._normalize_columns(self._config.columns)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
from docarray import Document
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from ..base.backend import BaseBackendMixin, TypeMap
from ....helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
columns: Optional[List[Tuple[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='TEXT', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='integer', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _normalize_columns(self, columns):
columns = super()._normalize_columns(columns)
for i in range(len(columns)):
columns[i] = (
columns[i][0],
self._map_type(columns[i][1]),
)
return columns
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
**kwargs,
):
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
self._config.columns = self._normalize_columns(self._config.columns)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
from .... import Document
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool as average_pool
from keras.src.ops.nn import batch_normalization as batch_normalization
from keras.src.ops.nn import binary_crossentropy as binary_crossentropy
from keras.src.ops.nn import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.ops.nn import celu as celu
from keras.src.ops.nn import conv as conv
from keras.src.ops.nn import conv_transpose as conv_transpose
from keras.src.ops.nn import ctc_decode as ctc_decode
from keras.src.ops.nn import ctc_loss as ctc_loss
from keras.src.ops.nn import depthwise_conv as depthwise_conv
from keras.src.ops.nn import dot_product_attention as dot_product_attention
from keras.src.ops.nn import elu as elu
from keras.src.ops.nn import gelu as gelu
from keras.src.ops.nn import glu as glu
from keras.src.ops.nn import hard_shrink as hard_shrink
from keras.src.ops.nn import hard_sigmoid as hard_sigmoid
from keras.src.ops.nn import hard_silu as hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh as hard_tanh
from keras.src.ops.nn import leaky_relu as leaky_relu
from keras.src.ops.nn import log_sigmoid as log_sigmoid
from keras.src.ops.nn import log_softmax as log_softmax
from keras.src.ops.nn import max_pool as max_pool
from keras.src.ops.nn import moments as moments
from keras.src.ops.nn import multi_hot as multi_hot
from keras.src.ops.nn import normalize as normalize
from keras.src.ops.nn import one_hot as one_hot
from keras.src.ops.nn import polar as polar
from keras.src.ops.nn import psnr as psnr
from keras.src.ops.nn import relu as relu
from keras.src.ops.nn import relu6 as relu6
from keras.src.ops.nn import rms_normalization as rms_normalization
from keras.src.ops.nn import selu as selu
from keras.src.ops.nn import separable_conv as separable_conv
from keras.src.ops.nn import sigmoid as sigmoid
from keras.src.ops.nn import silu as silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink as soft_shrink
from keras.src.ops.nn import softmax as softmax
from keras.src.ops.nn import softplus as softplus
from keras.src.ops.nn import softsign as softsign
from keras.src.ops.nn import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.ops.nn import sparse_plus as sparse_plus
from keras.src.ops.nn import sparse_sigmoid as sparse_sigmoid
from keras.src.ops.nn import sparsemax as sparsemax
from keras.src.ops.nn import squareplus as squareplus
from keras.src.ops.nn import tanh_shrink as tanh_shrink
from keras.src.ops.nn import threshold as threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import polar
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import rms_normalization
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import sparse_sigmoid
from keras.src.ops.nn import sparsemax
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
from keras.src.ops.nn import threshold
|
"""This module is deprecated and will be removed in a future release.
Please use LangChainTracer instead.
"""
from typing import Any
def get_headers(*args: Any, **kwargs: Any) -> Any: # noqa: ARG001
"""Throw an error because this has been replaced by get_headers."""
msg = (
"get_headers for LangChainTracerV1 is no longer supported. "
"Please use LangChainTracer instead."
)
raise RuntimeError(msg)
def LangChainTracerV1(*args: Any, **kwargs: Any) -> Any: # noqa: N802,ARG001
"""Throw an error because this has been replaced by LangChainTracer."""
msg = (
"LangChainTracerV1 is no longer supported. Please use LangChainTracer instead."
)
raise RuntimeError(msg)
|
"""This module is deprecated and will be removed in a future release.
Please use LangChainTracer instead.
"""
from typing import Any
def get_headers(*args: Any, **kwargs: Any) -> Any:
"""Throw an error because this has been replaced by get_headers."""
msg = (
"get_headers for LangChainTracerV1 is no longer supported. "
"Please use LangChainTracer instead."
)
raise RuntimeError(msg)
def LangChainTracerV1(*args: Any, **kwargs: Any) -> Any: # noqa: N802
"""Throw an error because this has been replaced by LangChainTracer."""
msg = (
"LangChainTracerV1 is no longer supported. Please use LangChainTracer instead."
)
raise RuntimeError(msg)
|
import os
import time
import uuid
from contextlib import contextmanager
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, RepositoryNotFoundError
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config):
old_environ = dict(os.environ)
os.environ["HF_TOKEN"] = CI_HUB_USER_TOKEN
yield
os.environ.clear()
os.environ.update(old_environ)
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id: Optional[str] = None):
repo_id = repo_id or f"{CI_HUB_USER}/test-dataset-{uuid.uuid4().hex[:6]}-{int(time.time() * 10e3)}"
try:
yield repo_id
finally:
try:
cleanup_repo(repo_id)
except RepositoryNotFoundError:
pass
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file_content):
repo_name = f"repo_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=text_file_content.encode(),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
|
import os
import time
import uuid
from contextlib import contextmanager
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, RepositoryNotFoundError
CI_HUB_USER = "DSUser"
CI_HUB_USER_FULL_NAME = "Dummy Datasets User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config):
old_environ = dict(os.environ)
os.environ["HF_TOKEN"] = CI_HUB_USER_TOKEN
yield
os.environ.clear()
os.environ.update(old_environ)
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id: Optional[str] = None):
repo_id = repo_id or f"{CI_HUB_USER}/test-dataset-{uuid.uuid4().hex[:6]}-{int(time.time() * 10e3)}"
try:
yield repo_id
finally:
try:
cleanup_repo(repo_id)
except RepositoryNotFoundError:
pass
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file_content):
repo_name = f"repo_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=text_file_content.encode(),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
|
_base_ = './mask-rcnn_r101_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './mask_rcnn_r101_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import ElasticKnnSearch, ElasticVectorSearch
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ElasticVectorSearch": "langchain_community.vectorstores",
"ElasticKnnSearch": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ElasticKnnSearch",
"ElasticVectorSearch",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import ElasticKnnSearch, ElasticVectorSearch
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ElasticVectorSearch": "langchain_community.vectorstores",
"ElasticKnnSearch": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ElasticVectorSearch",
"ElasticKnnSearch",
]
|
from abc import ABC, abstractmethod
from langchain_core.runnables.config import run_in_executor
from pydantic import BaseModel, Field
class SparseVector(BaseModel, extra="forbid"):
"""
Sparse vector structure
"""
indices: list[int] = Field(..., description="indices must be unique")
values: list[float] = Field(
..., description="values and indices must be the same length"
)
class SparseEmbeddings(ABC):
"""An interface for sparse embedding models to use with Qdrant."""
@abstractmethod
def embed_documents(self, texts: list[str]) -> list[SparseVector]:
"""Embed search docs."""
@abstractmethod
def embed_query(self, text: str) -> SparseVector:
"""Embed query text."""
async def aembed_documents(self, texts: list[str]) -> list[SparseVector]:
"""Asynchronous Embed search docs."""
return await run_in_executor(None, self.embed_documents, texts)
async def aembed_query(self, text: str) -> SparseVector:
"""Asynchronous Embed query text."""
return await run_in_executor(None, self.embed_query, text)
|
from abc import ABC, abstractmethod
from typing import List
from langchain_core.runnables.config import run_in_executor
from pydantic import BaseModel, Field
class SparseVector(BaseModel, extra="forbid"):
"""
Sparse vector structure
"""
indices: List[int] = Field(..., description="indices must be unique")
values: List[float] = Field(
..., description="values and indices must be the same length"
)
class SparseEmbeddings(ABC):
"""An interface for sparse embedding models to use with Qdrant."""
@abstractmethod
def embed_documents(self, texts: List[str]) -> List[SparseVector]:
"""Embed search docs."""
@abstractmethod
def embed_query(self, text: str) -> SparseVector:
"""Embed query text."""
async def aembed_documents(self, texts: List[str]) -> List[SparseVector]:
"""Asynchronous Embed search docs."""
return await run_in_executor(None, self.embed_documents, texts)
async def aembed_query(self, text: str) -> SparseVector:
"""Asynchronous Embed query text."""
return await run_in_executor(None, self.embed_query, text)
|
_base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='Darknet',
depth=53,
out_indices=(3, 4, 5),
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://darknet53')),
neck=dict(
type='YOLOV3Neck',
num_scales=3,
in_channels=[1024, 512, 256],
out_channels=[512, 256, 128]),
bbox_head=dict(
type='YOLOV3Head',
num_classes=80,
in_channels=[512, 256, 128],
out_channels=[1024, 512, 256],
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=2.0,
reduction='sum'),
loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=[(320, 320), (608, 608)], keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(608, 608),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=2000, # same as burn-in in darknet
warmup_ratio=0.1,
step=[218, 246])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=273)
evaluation = dict(interval=1, metric=['bbox'])
|
_base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='Darknet',
depth=53,
out_indices=(3, 4, 5),
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://darknet53')),
neck=dict(
type='YOLOV3Neck',
num_scales=3,
in_channels=[1024, 512, 256],
out_channels=[512, 256, 128]),
bbox_head=dict(
type='YOLOV3Head',
num_classes=80,
in_channels=[512, 256, 128],
out_channels=[1024, 512, 256],
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=2.0,
reduction='sum'),
loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='PhotoMetricDistortion'),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=[(320, 320), (608, 608)], keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(608, 608),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=2000, # same as burn-in in darknet
warmup_ratio=0.1,
step=[218, 246])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=273)
evaluation = dict(interval=1, metric=['bbox'])
|
from langchain_core.embeddings import Embeddings
from langchain_tests.unit_tests.embeddings import EmbeddingsUnitTests
from langchain_openai import AzureOpenAIEmbeddings
class TestAzureOpenAIStandard(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> type[Embeddings]:
return AzureOpenAIEmbeddings
@property
def embedding_model_params(self) -> dict:
return {"api_key": "api_key", "azure_endpoint": "https://endpoint.com"}
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return (
{
"AZURE_OPENAI_API_KEY": "api_key",
"AZURE_OPENAI_ENDPOINT": "https://endpoint.com",
"AZURE_OPENAI_AD_TOKEN": "token",
"OPENAI_ORG_ID": "org_id",
"OPENAI_API_VERSION": "yyyy-mm-dd",
"OPENAI_API_TYPE": "type",
},
{},
{
"openai_api_key": "api_key",
"azure_endpoint": "https://endpoint.com",
"azure_ad_token": "token",
"openai_organization": "org_id",
"openai_api_version": "yyyy-mm-dd",
"openai_api_type": "type",
},
)
|
from typing import Tuple, Type
from langchain_core.embeddings import Embeddings
from langchain_tests.unit_tests.embeddings import EmbeddingsUnitTests
from langchain_openai import AzureOpenAIEmbeddings
class TestAzureOpenAIStandard(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> Type[Embeddings]:
return AzureOpenAIEmbeddings
@property
def embedding_model_params(self) -> dict:
return {"api_key": "api_key", "azure_endpoint": "https://endpoint.com"}
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
return (
{
"AZURE_OPENAI_API_KEY": "api_key",
"AZURE_OPENAI_ENDPOINT": "https://endpoint.com",
"AZURE_OPENAI_AD_TOKEN": "token",
"OPENAI_ORG_ID": "org_id",
"OPENAI_API_VERSION": "yyyy-mm-dd",
"OPENAI_API_TYPE": "type",
},
{},
{
"openai_api_key": "api_key",
"azure_endpoint": "https://endpoint.com",
"azure_ad_token": "token",
"openai_organization": "org_id",
"openai_api_version": "yyyy-mm-dd",
"openai_api_type": "type",
},
)
|
"""Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction, add_arg_group
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='WorkerRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/executor-files/>`__
''',
)
gp.add_argument(
'--port-in',
type=int,
default=helper.random_port(),
dest='port',
help='The port for input data to bind to, default a random port between [49152, 65535]',
)
gp.add_argument(
'--host-in',
type=str,
default=__default_host__,
help=f'The host address for binding to, by default it is {__default_host__}',
)
gp.add_argument(
'--native',
action='store_true',
default=False,
help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.',
)
gp.add_argument(
'--output-array-type',
type=str,
default=None,
help='''
The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
''',
)
gp.add_argument(
'--grpc-server-options',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help="Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}",
default=None,
)
gp.add_argument(
'--exit-on-exceptions',
type=str,
default=[],
nargs='*',
help='List of exceptions that will cause the Executor to shut down.',
)
|
"""Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction, add_arg_group
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='WorkerRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/executor-files/>`__
''',
)
gp.add_argument(
'--port-in',
type=int,
default=helper.random_port(),
dest='port',
help='The port for input data to bind to, default a random port between [49152, 65535]',
)
gp.add_argument(
'--host-in',
type=str,
default=__default_host__,
help=f'The host address for binding to, by default it is {__default_host__}',
)
gp.add_argument(
'--native',
action='store_true',
default=False,
help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.',
)
gp.add_argument(
'--output-array-type',
type=str,
default=None,
help='''
The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
''',
)
gp.add_argument(
'--grpc-server-options',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help="Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}",
default=None,
)
|
"""Google Search tool spec."""
import json
import httpx
import urllib.parse
from typing import Dict, List, Optional, Union
from llama_index.core.tools.tool_spec.base import BaseToolSpec
QUERY_URL_TMPL = (
"https://www.googleapis.com/customsearch/v1?key={key}&cx={engine}&q={query}"
)
class GoogleSearchToolSpec(BaseToolSpec):
"""Google Search tool spec."""
spec_functions = [("google_search", "agoogle_search")]
def __init__(self, key: str, engine: str, num: Optional[int] = None) -> None:
"""Initialize with parameters."""
self.key = key
self.engine = engine
self.num = num
def _get_url(self, query: str) -> str:
url = QUERY_URL_TMPL.format(
key=self.key, engine=self.engine, query=urllib.parse.quote_plus(query)
)
if self.num is not None:
if not 1 <= self.num <= 10:
raise ValueError("num should be an integer between 1 and 10, inclusive")
url += f"&num={self.num}"
return url
def _parse_results(self, results: List[Dict]) -> Union[List[Dict], str]:
cleaned_results = []
if len(results) == 0:
return "No search results available"
for result in results:
if "snippet" in result:
cleaned_results.append(
{
"title": result["title"],
"link": result["link"],
"snippet": result["snippet"],
}
)
return cleaned_results
def google_search(self, query: str):
"""
Make a query to the Google search engine to receive a list of results.
Args:
query (str): The query to be passed to Google search.
num (int, optional): The number of search results to return. Defaults to None.
Raises:
ValueError: If the 'num' is not an integer between 1 and 10.
"""
url = self._get_url(query)
with httpx.Client() as client:
response = client.get(url)
results = json.loads(response.text).get("items", [])
return self._parse_results(results)
async def agoogle_search(self, query: str):
"""
Make a query to the Google search engine to receive a list of results.
Args:
query (str): The query to be passed to Google search.
num (int, optional): The number of search results to return. Defaults to None.
Raises:
ValueError: If the 'num' is not an integer between 1 and 10.
"""
url = self._get_url(query)
async with httpx.AsyncClient() as client:
response = await client.get(url)
results = json.loads(response.text).get("items", [])
return self._parse_results(results)
|
"""Google Search tool spec."""
import json
import urllib.parse
from typing import Optional
import requests
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
QUERY_URL_TMPL = (
"https://www.googleapis.com/customsearch/v1?key={key}&cx={engine}&q={query}"
)
class GoogleSearchToolSpec(BaseToolSpec):
"""Google Search tool spec."""
spec_functions = ["google_search"]
def __init__(self, key: str, engine: str, num: Optional[int] = None) -> None:
"""Initialize with parameters."""
self.key = key
self.engine = engine
self.num = num
def google_search(self, query: str):
"""
Make a query to the Google search engine to receive a list of results.
Args:
query (str): The query to be passed to Google search.
num (int, optional): The number of search results to return. Defaults to None.
Raises:
ValueError: If the 'num' is not an integer between 1 and 10.
"""
url = QUERY_URL_TMPL.format(
key=self.key, engine=self.engine, query=urllib.parse.quote_plus(query)
)
if self.num is not None:
if not 1 <= self.num <= 10:
raise ValueError("num should be an integer between 1 and 10, inclusive")
url += f"&num={self.num}"
response = requests.get(url)
results = json.loads(response.text).get("items", [])
documents = []
if len(results) == 0:
return "No search results available"
for result in results:
if "snippet" in result:
documents.append(
Document(
text=result["snippet"],
metadata={"title": result["title"], "link": result["link"]},
)
)
return documents
|
""" Helper module to manage torch vision models """
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torchvision.models as models
from torchvision.models.alexnet import __all__ as all_alexnet_models
from torchvision.models.densenet import __all__ as all_densenet_models
from torchvision.models.googlenet import __all__ as all_googlenet_models
from torchvision.models.mnasnet import __all__ as all_mnasnet_models
from torchvision.models.mobilenet import __all__ as all_mobilenet_models
from torchvision.models.resnet import __all__ as all_resnet_models
from torchvision.models.squeezenet import __all__ as all_squeezenet_models
from torchvision.models.vgg import __all__ as all_vgg_models
class EmbeddingModelWrapper:
"""
The ``EmbeddingModelWrapper`` acts as an unified interface to the `torchvision` models.
It hides the model specific logic for computing embeddings from the user.
:param model_name: Name of the `torchvision` model. Classnames are not allowed, i.e.
use `resnet_18` instead of `ResNet`.
:param device: Which device the model runs on. Can be 'cpu' or 'cuda'
"""
def __init__(self, model_name: str, device: Optional[str] = None):
if not device:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
self._layer_name = _ModelCatalogue.get_layer_name(model_name)
self._model = getattr(models, model_name)(pretrained=True)
self._model.to(torch.device(self.device))
self._pooling_layer = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self._pooling_layer.to(torch.device(self.device))
def _pooling_function(self, tensor_in: 'torch.Tensor') -> 'torch.Tensor':
return torch.flatten(self._pooling_layer(tensor_in), 1)
def get_features(self, content: 'torch.Tensor') -> 'torch.Tensor':
feature_map = None
def get_activations(model, model_input, output):
nonlocal feature_map
feature_map = output.detach()
layer = getattr(self._model, self._layer_name)
handle = layer.register_forward_hook(get_activations)
self._model(content)
handle.remove()
return feature_map
def compute_embeddings(self, images: 'np.ndarray') -> 'np.ndarray':
tensor = torch.from_numpy(images).to(self.device)
features = self.get_features(tensor)
features = self._pooling_function(features)
features = features.detach().cpu().numpy()
return features
class _ModelCatalogue:
# maps the tuple of available model names to the layer from which we want to
# extract the embedding. Removes the first entry because it the model class
# not the factory method.
all_supported_models_to_layer_mapping = {
tuple(all_resnet_models[1:]): 'layer4',
tuple(all_alexnet_models[1:]): 'features',
tuple(all_vgg_models[1:]): 'features',
tuple(all_squeezenet_models[1:]): 'features',
tuple(all_densenet_models[1:]): 'features',
tuple(all_mnasnet_models[1:]): 'layers',
tuple(all_mobilenet_models[1:]): 'features',
tuple(all_googlenet_models[1:]): 'inception5b',
}
@classmethod
def is_model_supported(cls, model_name: str):
return any([model_name in m for m in cls.all_supported_models_to_layer_mapping])
@classmethod
def get_layer_name(cls, model_name: str) -> str:
"""
Checks if model is supported and returns the lookup on the layer name.
:param model_name: Name of the layer
"""
if not cls.is_model_supported(model_name):
raise ValueError(
f'Model with name {model_name} is not supported. '
f'Supported models are: {cls.all_supported_models_to_layer_mapping.keys()}'
)
for (
model_names,
layer_name,
) in cls.all_supported_models_to_layer_mapping.items():
if model_name in model_names:
return layer_name
|
""" Helper module to manage torch vision models """
from typing import Optional
import torch
import torchvision.models as models
import torch.nn as nn
import numpy as np
from torchvision.models.resnet import __all__ as all_resnet_models
from torchvision.models.alexnet import __all__ as all_alexnet_models
from torchvision.models.vgg import __all__ as all_vgg_models
from torchvision.models.squeezenet import __all__ as all_squeezenet_models
from torchvision.models.densenet import __all__ as all_densenet_models
from torchvision.models.mnasnet import __all__ as all_mnasnet_models
from torchvision.models.mobilenet import __all__ as all_mobilenet_models
from torchvision.models.googlenet import __all__ as all_googlenet_models
class EmbeddingModelWrapper:
"""
The ``EmbeddingModelWrapper`` acts as an unified interface to the `torchvision` models.
It hides the model specific logic for computing embeddings from the user.
:param model_name: Name of the `torchvision` model. Classnames are not allowed, i.e.
use `resnet_18` instead of `ResNet`.
:param device: Which device the model runs on. Can be 'cpu' or 'cuda'
"""
def __init__(self, model_name: str, device: Optional[str] = None):
if not device:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self._layer_name = _ModelCatalogue.get_layer_name(model_name)
self._model = getattr(models, model_name)(pretrained=True)
self.device = device
self._pooling_layer = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self._pooling_layer.to(torch.device(self.device))
def _pooling_function(self, tensor_in: 'torch.Tensor') -> 'torch.Tensor':
return torch.flatten(self._pooling_layer(tensor_in), 1)
def get_features(self, content: 'torch.Tensor') -> 'torch.Tensor':
feature_map = None
def get_activations(model, model_input, output):
nonlocal feature_map
feature_map = output.detach()
layer = getattr(self._model, self._layer_name)
handle = layer.register_forward_hook(get_activations)
self._model(content)
handle.remove()
return feature_map
def compute_embeddings(self, images: 'np.ndarray') -> 'np.ndarray':
tensor = torch.from_numpy(images).to(self.device)
features = self.get_features(tensor)
features = self._pooling_function(features)
features = features.detach().numpy()
return features
class _ModelCatalogue:
# maps the tuple of available model names to the layer from which we want to
# extract the embedding. Removes the first entry because it the model class
# not the factory method.
all_supported_models_to_layer_mapping = {
tuple(all_resnet_models[1:]): 'layer4',
tuple(all_alexnet_models[1:]): 'features',
tuple(all_vgg_models[1:]): 'features',
tuple(all_squeezenet_models[1:]): 'features',
tuple(all_densenet_models[1:]): 'features',
tuple(all_mnasnet_models[1:]): 'layers',
tuple(all_mobilenet_models[1:]): 'features',
tuple(all_googlenet_models[1:]): 'inception5b',
}
@classmethod
def is_model_supported(cls, model_name: str):
return any([model_name in m for m in cls.all_supported_models_to_layer_mapping])
@classmethod
def get_layer_name(cls, model_name: str) -> str:
"""
Checks if model is supported and returns the lookup on the layer name.
:param model_name: Name of the layer
"""
if not cls.is_model_supported(model_name):
raise ValueError(f'Model with name {model_name} is not supported. '
f'Supported models are: {cls.all_supported_models_to_layer_mapping.keys()}')
for model_names, layer_name in cls.all_supported_models_to_layer_mapping.items():
if model_name in model_names:
return layer_name
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import pycocotools.mask as mask_util
import torch
def split_combined_polys(polys, poly_lens, polys_per_mask):
"""Split the combined 1-D polys into masks.
A mask is represented as a list of polys, and a poly is represented as
a 1-D array. In dataset, all masks are concatenated into a single 1-D
tensor. Here we need to split the tensor into original representations.
Args:
polys (list): a list (length = image num) of 1-D tensors
poly_lens (list): a list (length = image num) of poly length
polys_per_mask (list): a list (length = image num) of poly number
of each mask
Returns:
list: a list (length = image num) of list (length = mask num) of \
list (length = poly num) of numpy array.
"""
mask_polys_list = []
for img_id in range(len(polys)):
polys_single = polys[img_id]
polys_lens_single = poly_lens[img_id].tolist()
polys_per_mask_single = polys_per_mask[img_id].tolist()
split_polys = mmcv.slice_list(polys_single, polys_lens_single)
mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)
mask_polys_list.append(mask_polys)
return mask_polys_list
# TODO: move this function to more proper place
def encode_mask_results(mask_results):
"""Encode bitmap mask to RLE code.
Args:
mask_results (list): bitmap mask results.
Returns:
list | tuple: RLE encoded mask.
"""
encoded_mask_results = []
for mask in mask_results:
encoded_mask_results.append(
mask_util.encode(
np.array(mask[:, :, np.newaxis], order='F',
dtype='uint8'))[0]) # encoded with RLE
return encoded_mask_results
def mask2bbox(masks):
"""Obtain tight bounding boxes of binary masks.
Args:
masks (Tensor): Binary mask of shape (n, h, w).
Returns:
Tensor: Bboxe with shape (n, 4) of \
positive region in binary mask.
"""
N = masks.shape[0]
bboxes = masks.new_zeros((N, 4), dtype=torch.float32)
x_any = torch.any(masks, dim=1)
y_any = torch.any(masks, dim=2)
for i in range(N):
x = torch.where(x_any[i, :])[0]
y = torch.where(y_any[i, :])[0]
if len(x) > 0 and len(y) > 0:
bboxes[i, :] = bboxes.new_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1])
return bboxes
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import pycocotools.mask as mask_util
import torch
def split_combined_polys(polys, poly_lens, polys_per_mask):
"""Split the combined 1-D polys into masks.
A mask is represented as a list of polys, and a poly is represented as
a 1-D array. In dataset, all masks are concatenated into a single 1-D
tensor. Here we need to split the tensor into original representations.
Args:
polys (list): a list (length = image num) of 1-D tensors
poly_lens (list): a list (length = image num) of poly length
polys_per_mask (list): a list (length = image num) of poly number
of each mask
Returns:
list: a list (length = image num) of list (length = mask num) of \
list (length = poly num) of numpy array.
"""
mask_polys_list = []
for img_id in range(len(polys)):
polys_single = polys[img_id]
polys_lens_single = poly_lens[img_id].tolist()
polys_per_mask_single = polys_per_mask[img_id].tolist()
split_polys = mmcv.slice_list(polys_single, polys_lens_single)
mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)
mask_polys_list.append(mask_polys)
return mask_polys_list
# TODO: move this function to more proper place
def encode_mask_results(mask_results):
"""Encode bitmap mask to RLE code.
Args:
mask_results (list | tuple[list]): bitmap mask results.
In mask scoring rcnn, mask_results is a tuple of (segm_results,
segm_cls_score).
Returns:
list | tuple: RLE encoded mask.
"""
if isinstance(mask_results, tuple): # mask scoring
cls_segms, cls_mask_scores = mask_results
else:
cls_segms = mask_results
num_classes = len(cls_segms)
encoded_mask_results = [[] for _ in range(num_classes)]
for i in range(len(cls_segms)):
for cls_segm in cls_segms[i]:
encoded_mask_results[i].append(
mask_util.encode(
np.array(
cls_segm[:, :, np.newaxis], order='F',
dtype='uint8'))[0]) # encoded with RLE
if isinstance(mask_results, tuple):
return encoded_mask_results, cls_mask_scores
else:
return encoded_mask_results
def mask2bbox(masks):
"""Obtain tight bounding boxes of binary masks.
Args:
masks (Tensor): Binary mask of shape (n, h, w).
Returns:
Tensor: Bboxe with shape (n, 4) of \
positive region in binary mask.
"""
N = masks.shape[0]
bboxes = masks.new_zeros((N, 4), dtype=torch.float32)
x_any = torch.any(masks, dim=1)
y_any = torch.any(masks, dim=2)
for i in range(N):
x = torch.where(x_any[i, :])[0]
y = torch.where(y_any[i, :])[0]
if len(x) > 0 and len(y) > 0:
bboxes[i, :] = bboxes.new_tensor(
[x[0], y[0], x[-1] + 1, y[-1] + 1])
return bboxes
|
from typing import Any, Callable, Dict, Type
import orjson
from docarray.utils._internal.pydantic import is_pydantic_v2
if not is_pydantic_v2:
from pydantic.json import ENCODERS_BY_TYPE
else:
ENCODERS_BY_TYPE: Dict[Type[Any], Callable[[Any], Any]] = {
bytes: lambda o: o.decode(),
frozenset: list,
set: list,
}
def _default_orjson(obj):
"""
default option for orjson dumps.
:param obj:
:return: return a json compatible object
"""
from docarray.base_doc import BaseNode
if isinstance(obj, BaseNode):
return obj._docarray_to_json_compatible()
else:
for cls_, encoder in ENCODERS_BY_TYPE.items():
if isinstance(obj, cls_):
return encoder(obj)
return obj
def orjson_dumps(v, *, default=None) -> bytes:
# dumps to bytes using orjson
return orjson.dumps(v, default=_default_orjson, option=orjson.OPT_SERIALIZE_NUMPY)
def orjson_dumps_and_decode(v, *, default=None) -> str:
# dumps to str using orjson
return orjson_dumps(v, default=default).decode()
|
import orjson
from pydantic.json import ENCODERS_BY_TYPE
def _default_orjson(obj):
"""
default option for orjson dumps.
:param obj:
:return: return a json compatible object
"""
from docarray.base_doc import BaseNode
if isinstance(obj, BaseNode):
return obj._docarray_to_json_compatible()
else:
for cls_, encoder in ENCODERS_BY_TYPE.items():
if isinstance(obj, cls_):
return encoder(obj)
return obj
def orjson_dumps(v, *, default=None) -> bytes:
# dumps to bytes using orjson
return orjson.dumps(v, default=_default_orjson, option=orjson.OPT_SERIALIZE_NUMPY)
def orjson_dumps_and_decode(v, *, default=None) -> str:
# dumps to bytes using orjson
return orjson_dumps(v, default=default).decode()
|
import hashlib
import io
import os
import urllib
import warnings
from typing import List, Optional, Union
import torch
from tqdm import tqdm
from .audio import load_audio, log_mel_spectrogram, pad_or_trim
from .decoding import DecodingOptions, DecodingResult, decode, detect_language
from .model import Whisper, ModelDimensions
from .transcribe import transcribe
_MODELS = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
}
def _download(url: str, root: str, in_memory: bool) -> Union[bytes, str]:
os.makedirs(root, exist_ok=True)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, os.path.basename(url))
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
model_bytes = open(download_target, "rb").read()
if hashlib.sha256(model_bytes).hexdigest() == expected_sha256:
return model_bytes if in_memory else download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
model_bytes = open(download_target, "rb").read()
if hashlib.sha256(model_bytes).hexdigest() != expected_sha256:
raise RuntimeError("Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.")
return model_bytes if in_memory else download_target
def available_models() -> List[str]:
"""Returns the names of available models"""
return list(_MODELS.keys())
def load_model(name: str, device: Optional[Union[str, torch.device]] = None, download_root: str = None, in_memory: bool = False) -> Whisper:
"""
Load a Whisper ASR model
Parameters
----------
name : str
one of the official model names listed by `whisper.available_models()`, or
path to a model checkpoint containing the model dimensions and the model state_dict.
device : Union[str, torch.device]
the PyTorch device to put the model into
download_root: str
path to download the model files; by default, it uses "~/.cache/whisper"
in_memory: bool
whether to preload the model weights into host memory
Returns
-------
model : Whisper
The Whisper ASR model instance
"""
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
if download_root is None:
download_root = os.path.join(os.path.expanduser("~"), ".cache", "whisper")
if name in _MODELS:
checkpoint_file = _download(_MODELS[name], download_root, in_memory)
elif os.path.isfile(name):
checkpoint_file = open(name, "rb").read() if in_memory else name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
with (io.BytesIO(checkpoint_file) if in_memory else open(checkpoint_file, "rb")) as fp:
checkpoint = torch.load(fp, map_location=device)
del checkpoint_file
dims = ModelDimensions(**checkpoint["dims"])
model = Whisper(dims)
model.load_state_dict(checkpoint["model_state_dict"])
return model.to(device)
|
import hashlib
import io
import os
import urllib
import warnings
from typing import List, Optional, Union
import torch
from tqdm import tqdm
from .audio import load_audio, log_mel_spectrogram, pad_or_trim
from .decoding import DecodingOptions, DecodingResult, decode, detect_language
from .model import Whisper, ModelDimensions
from .transcribe import transcribe
_MODELS = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
}
def _download(url: str, root: str) -> bytes:
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
model_bytes = open(download_target, "rb").read()
if hashlib.sha256(model_bytes).hexdigest() == expected_sha256:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
model_bytes = open(download_target, "rb").read()
if hashlib.sha256(model_bytes).hexdigest() != expected_sha256:
raise RuntimeError("Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.")
return model_bytes
def available_models() -> List[str]:
"""Returns the names of available models"""
return list(_MODELS.keys())
def load_model(name: str, device: Optional[Union[str, torch.device]] = None, download_root: str = None) -> Whisper:
"""
Load a Whisper ASR model
Parameters
----------
name : str
one of the official model names listed by `whisper.available_models()`, or
path to a model checkpoint containing the model dimensions and the model state_dict.
device : Union[str, torch.device]
the PyTorch device to put the model into
download_root: str
path to download the model files; by default, it uses "~/.cache/whisper"
Returns
-------
model : Whisper
The Whisper ASR model instance
"""
if name in _MODELS:
model_bytes = _download(_MODELS[name], download_root or os.path.expanduser("~/.cache/whisper"))
elif os.path.isfile(name):
model_bytes = open(name, "rb").read()
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
with io.BytesIO(model_bytes) as fp:
checkpoint = torch.load(fp, map_location="cpu")
dims = ModelDimensions(**checkpoint["dims"])
state_dict = checkpoint["model_state_dict"]
model = Whisper(dims)
model.load_state_dict(state_dict)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
return model.to(device)
|
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action:' after 'Thought:'"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action Input:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
"Parsing LLM output produced both a final answer and a parse-able action:"
)
class ReActSingleInputOutputParser(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action: search
Action Input: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match:
if includes_answer:
msg = f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
raise OutputParserException(msg)
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
if includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()},
text,
)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
if not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)",
text,
re.DOTALL,
):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(msg)
@property
def _type(self) -> str:
return "react-single-input"
|
import re
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action:' after 'Thought:'"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action Input:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
"Parsing LLM output produced both a final answer and a parse-able action:"
)
class ReActSingleInputOutputParser(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action: search
Action Input: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if action_match:
if includes_answer:
msg = f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
raise OutputParserException(msg)
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
if includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
if not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(
msg,
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(msg)
@property
def _type(self) -> str:
return "react-single-input"
|
PREFIX = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.""" # noqa: E501
FORMAT_INSTRUCTIONS = """RESPONSE FORMAT INSTRUCTIONS
----------------------------
When responding to me, please output a response in one of two formats:
**Option 1:**
Use this if you want the human to use a tool.
Markdown code snippet formatted in the following schema:
```json
{{{{
"action": string, \\\\ The action to take. Must be one of {tool_names}
"action_input": string \\\\ The input to the action
}}}}
```
**Option #2:**
Use this if you want to respond directly to the human. Markdown code snippet formatted in the following schema:
```json
{{{{
"action": "Final Answer",
"action_input": string \\\\ You should put what you want to return to use here
}}}}
```""" # noqa: E501
SUFFIX = """TOOLS
------
Assistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:
{{tools}}
{format_instructions}
USER'S INPUT
--------------------
Here is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):
{{{{input}}}}""" # noqa: E501
TEMPLATE_TOOL_RESPONSE = """TOOL RESPONSE:
---------------------
{observation}
USER'S INPUT
--------------------
Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else.""" # noqa: E501
|
# flake8: noqa
PREFIX = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist."""
FORMAT_INSTRUCTIONS = """RESPONSE FORMAT INSTRUCTIONS
----------------------------
When responding to me, please output a response in one of two formats:
**Option 1:**
Use this if you want the human to use a tool.
Markdown code snippet formatted in the following schema:
```json
{{{{
"action": string, \\\\ The action to take. Must be one of {tool_names}
"action_input": string \\\\ The input to the action
}}}}
```
**Option #2:**
Use this if you want to respond directly to the human. Markdown code snippet formatted in the following schema:
```json
{{{{
"action": "Final Answer",
"action_input": string \\\\ You should put what you want to return to use here
}}}}
```"""
SUFFIX = """TOOLS
------
Assistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:
{{tools}}
{format_instructions}
USER'S INPUT
--------------------
Here is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):
{{{{input}}}}"""
TEMPLATE_TOOL_RESPONSE = """TOOL RESPONSE:
---------------------
{observation}
USER'S INPUT
--------------------
Okay, so what is the response to my last comment? If using information obtained from the tools you must mention it explicitly without mentioning the tool names - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else."""
|
import multiprocessing
import pytest
from jina import Client
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.servers import BaseServer
from jina.serve.runtimes.worker.request_handling import WorkerRequestHandler
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
from tests.helper import _generate_pod_args
def _create_worker_runtime(port, name='', executor=None):
args = _generate_pod_args()
args.port = [port]
args.name = name
if executor:
args.uses = executor
with AsyncNewLoopRuntime(args, req_handler_cls=WorkerRequestHandler) as runtime:
runtime.run_forever()
def _create_gateway_runtime(graph_description, pod_addresses, port, protocol='grpc'):
with AsyncNewLoopRuntime(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
'--protocol',
protocol,
]
), req_handler_cls=GatewayRequestHandler
) as runtime:
runtime.run_forever()
def _setup(worker_port, port, protocol):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port,)
)
worker_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port, protocol),
)
gateway_process.start()
BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{worker_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
return worker_process, gateway_process
@pytest.mark.parametrize('protocol', ['http'])
def test_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', port=port, protocol=protocol)
dry_run_alive = c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_async_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', asyncio=True, port=port, protocol=protocol)
dry_run_alive = await c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = await c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
import multiprocessing
import pytest
from jina import Client
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from tests.helper import _generate_pod_args
def _create_worker_runtime(port, name='', executor=None):
args = _generate_pod_args()
args.port = port
args.name = name
if executor:
args.uses = executor
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _create_gateway_runtime(graph_description, pod_addresses, port, protocol='grpc'):
with GatewayRuntime(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
'--protocol',
protocol,
]
)
) as runtime:
runtime.run_forever()
def _setup(worker_port, port, protocol):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port,)
)
worker_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port, protocol),
)
gateway_process.start()
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{worker_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
return worker_process, gateway_process
@pytest.mark.parametrize('protocol', ['http'])
def test_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', port=port, protocol=protocol)
dry_run_alive = c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_async_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', asyncio=True, port=port, protocol=protocol)
dry_run_alive = await c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = await c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
_base_ = './cornernet_hourglass104_8xb6-210e-mstest_coco.py'
train_dataloader = dict(batch_size=3)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (3 samples per GPU)
auto_scale_lr = dict(base_batch_size=96)
|
_base_ = './cornernet_hourglass104_mstest_8x6_210e_coco.py'
train_dataloader = dict(batch_size=3)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (3 samples per GPU)
auto_scale_lr = dict(base_batch_size=96)
|
import json
import re
from datetime import datetime
from typing import List
import requests
from tenacity import retry, stop_after_attempt, wait_random_exponential
def correct_date(yr, dt):
"""
Some transcripts have incorrect date, correcting it.
Args:
yr (int): actual
dt (datetime): given date
Returns:
datetime: corrected date
"""
dt = datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
if dt.year != yr:
dt = dt.replace(year=yr)
return dt.strftime("%Y-%m-%d %H:%M:%S")
def extract_speakers(cont: str) -> List[str]:
"""
Extract the list of speakers.
Args:
cont (str): transcript content
Returns:
List[str]: list of speakers
"""
pattern = re.compile(r"\n(.*?):")
matches = pattern.findall(cont)
return list(set(matches))
@retry(wait=wait_random_exponential(min=1, max=5), stop=stop_after_attempt(2))
def get_earnings_transcript(quarter: str, ticker: str, year: int):
"""
Get the earnings transcripts.
Args:
quarter (str)
ticker (str)
year (int)
"""
response = requests.get(
f"https://discountingcashflows.com/api/transcript/{ticker}/{quarter}/{year}/",
auth=("user", "pass"),
)
resp_text = json.loads(response.text)
speakers_list = extract_speakers(resp_text[0]["content"])
corrected_date = correct_date(resp_text[0]["year"], resp_text[0]["date"])
resp_text[0]["date"] = corrected_date
return resp_text[0], speakers_list
|
import json
import re
from datetime import datetime
from typing import List
import requests
from tenacity import retry, stop_after_attempt, wait_random_exponential
def correct_date(yr, dt):
"""Some transcripts have incorrect date, correcting it.
Args:
yr (int): actual
dt (datetime): given date
Returns:
datetime: corrected date
"""
dt = datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
if dt.year != yr:
dt = dt.replace(year=yr)
return dt.strftime("%Y-%m-%d %H:%M:%S")
def extract_speakers(cont: str) -> List[str]:
"""Extract the list of speakers.
Args:
cont (str): transcript content
Returns:
List[str]: list of speakers
"""
pattern = re.compile(r"\n(.*?):")
matches = pattern.findall(cont)
return list(set(matches))
@retry(wait=wait_random_exponential(min=1, max=5), stop=stop_after_attempt(2))
def get_earnings_transcript(quarter: str, ticker: str, year: int):
"""Get the earnings transcripts.
Args:
quarter (str)
ticker (str)
year (int)
"""
response = requests.get(
f"https://discountingcashflows.com/api/transcript/{ticker}/{quarter}/{year}/",
auth=("user", "pass"),
)
resp_text = json.loads(response.text)
speakers_list = extract_speakers(resp_text[0]["content"])
corrected_date = correct_date(resp_text[0]["year"], resp_text[0]["date"])
resp_text[0]["date"] = corrected_date
return resp_text[0], speakers_list
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel
from torch.nn.parallel.distributed import DistributedDataParallel
from mmengine.model.wrappers import (MMDataParallel, MMDistributedDataParallel,
is_model_wrapper)
from mmengine.registry import MODEL_WRAPPERS
def mock(*args, **kwargs):
pass
@patch('torch.distributed._broadcast_coalesced', mock)
@patch('torch.distributed.broadcast', mock)
@patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
def test_is_model_wrapper():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
# _verify_model_across_ranks is added in torch1.9.0 so we should check
# whether _verify_model_across_ranks is the member of torch.distributed
# before mocking
if hasattr(torch.distributed, '_verify_model_across_ranks'):
torch.distributed._verify_model_across_ranks = mock
# _verify_model_across_ranks is added in torch1.11.0 so we should check
# whether _verify_params_across_processes is the member of
# torch.distributed before mocking
if hasattr(torch.distributed, '_verify_params_across_processes'):
torch.distributed._verify_params_across_processes = mock
model = Model()
assert not is_model_wrapper(model)
mmdp = MMDataParallel(model)
assert is_model_wrapper(mmdp)
mmddp = MMDistributedDataParallel(model, process_group=MagicMock())
assert is_model_wrapper(mmddp)
torch_dp = DataParallel(model)
assert is_model_wrapper(torch_dp)
torch_ddp = DistributedDataParallel(model, process_group=MagicMock())
assert is_model_wrapper(torch_ddp)
# test model wrapper registry
@MODEL_WRAPPERS.register_module()
class ModelWrapper:
def __init__(self, module):
self.module = module
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
model_wrapper = ModelWrapper(model)
assert is_model_wrapper(model_wrapper)
class TestMMDataParallel(TestCase):
def setUp(self):
"""Setup the demo image in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
def train_step(self, x):
return self.forward(x)
def val_step(self, x):
return self.forward(x)
self.model = Model()
def test_train_step(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
model = Model()
mmdp = MMDataParallel(model)
# test without train_step attribute
with pytest.raises(AssertionError):
mmdp.train_step(torch.zeros([1, 1, 3, 3]))
out = self.model.train_step(torch.zeros([1, 1, 3, 3]))
assert out.shape == (1, 2, 3, 3)
def test_val_step(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
model = Model()
mmdp = MMDataParallel(model)
# test without val_step attribute
with pytest.raises(AssertionError):
mmdp.val_step(torch.zeros([1, 1, 3, 3]))
out = self.model.val_step(torch.zeros([1, 1, 3, 3]))
assert out.shape == (1, 2, 3, 3)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel
from torch.nn.parallel.distributed import DistributedDataParallel
from mmengine.model.wrappers import (MMDataParallel, MMDistributedDataParallel,
is_model_wrapper)
from mmengine.registry import MODEL_WRAPPERS
def mock(*args, **kwargs):
pass
@patch('torch.distributed._broadcast_coalesced', mock)
@patch('torch.distributed.broadcast', mock)
@patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
def test_is_model_wrapper():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
# _verify_model_across_ranks is added in torch1.9.0 so we should check
# whether _verify_model_across_ranks is the member of torch.distributed
# before mocking
if hasattr(torch.distributed, '_verify_model_across_ranks'):
torch.distributed._verify_model_across_ranks = mock
model = Model()
assert not is_model_wrapper(model)
mmdp = MMDataParallel(model)
assert is_model_wrapper(mmdp)
mmddp = MMDistributedDataParallel(model, process_group=MagicMock())
assert is_model_wrapper(mmddp)
torch_dp = DataParallel(model)
assert is_model_wrapper(torch_dp)
torch_ddp = DistributedDataParallel(model, process_group=MagicMock())
assert is_model_wrapper(torch_ddp)
# test model wrapper registry
@MODEL_WRAPPERS.register_module()
class ModelWrapper:
def __init__(self, module):
self.module = module
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
model_wrapper = ModelWrapper(model)
assert is_model_wrapper(model_wrapper)
class TestMMDataParallel(TestCase):
def setUp(self):
"""Setup the demo image in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
def train_step(self, x):
return self.forward(x)
def val_step(self, x):
return self.forward(x)
self.model = Model()
def test_train_step(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
model = Model()
mmdp = MMDataParallel(model)
# test without train_step attribute
with pytest.raises(AssertionError):
mmdp.train_step(torch.zeros([1, 1, 3, 3]))
out = self.model.train_step(torch.zeros([1, 1, 3, 3]))
assert out.shape == (1, 2, 3, 3)
def test_val_step(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 2, 1)
def forward(self, x):
return self.conv(x)
model = Model()
mmdp = MMDataParallel(model)
# test without val_step attribute
with pytest.raises(AssertionError):
mmdp.val_step(torch.zeros([1, 1, 3, 3]))
out = self.model.val_step(torch.zeros([1, 1, 3, 3]))
assert out.shape == (1, 2, 3, 3)
|
import wave
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.audio_ndarray import MAX_INT_16, AudioNdArray
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
AUDIO_FILE_FORMATS = ['wav']
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_audio_extension = any(ext in url for ext in AUDIO_FILE_FORMATS)
if not has_audio_extension:
raise ValueError(
f'Audio URL must have one of the following extensions:'
f'{AUDIO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T, dtype: str = 'float32') -> AudioNdArray:
"""
Load the data from the url into an AudioNdArray.
:param dtype: Data-type of the returned array; default: float32.
:return: AudioNdArray representing the audio file content.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import AudioUrl
class MyDoc(Document):
audio_url: AudioUrl
audio_tensor: AudioNdArray
doc = MyDoc(audio_url="toydata/hello.wav")
doc.audio_tensor = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
"""
import io
file: Union[io.BytesIO, T]
if self.startswith('http'):
import requests
resp = requests.get(self)
resp.raise_for_status()
file = io.BytesIO()
file.write(resp.content)
file.seek(0)
else:
file = self
# note wave is Python built-in mod. https://docs.python.org/3/library/wave.html
with wave.open(file) as ifile:
samples = ifile.getnframes()
audio = ifile.readframes(samples)
# Convert buffer to float32 using NumPy
audio_as_np_int16 = np.frombuffer(audio, dtype=np.int16)
audio_as_np_float32 = audio_as_np_int16.astype(dtype=dtype)
# Normalise float32 array so that values are between -1.0 and +1.0
audio_norm = audio_as_np_float32 / MAX_INT_16
channels = ifile.getnchannels()
if channels == 2:
# 1 for mono, 2 for stereo
audio_stereo = np.empty((int(len(audio_norm) / channels), channels))
audio_stereo[:, 0] = audio_norm[range(0, len(audio_norm), 2)]
audio_stereo[:, 1] = audio_norm[range(1, len(audio_norm), 2)]
return parse_obj_as(AudioNdArray, audio_stereo)
else:
return parse_obj_as(AudioNdArray, audio_norm)
|
import wave
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.tensor.audio.audio_ndarray import MAX_INT_16, AudioNdArray
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='AudioUrl')
AUDIO_FILE_FORMATS = ['wav']
class AudioUrl(AnyUrl):
"""
URL to a .wav file.
Can be remote (web) URL, or a local file path.
"""
def _to_node_protobuf(self: T) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that needs to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(audio_url=str(self))
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_audio_extension = any(ext in url for ext in AUDIO_FILE_FORMATS)
if not has_audio_extension:
raise ValueError(
f'Audio URL must have one of the following extensions:'
f'{AUDIO_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(self: T, dtype: str = 'float32') -> AudioNdArray:
"""
Load the data from the url into an AudioNdArray.
:param dtype: Data-type of the returned array; default: float32.
:return: AudioNdArray representing the audio file content.
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
import numpy as np
from docarray.typing import AudioUrl
class MyDoc(Document):
audio_url: AudioUrl
audio_tensor: AudioNdArray
doc = MyDoc(audio_url="toydata/hello.wav")
doc.audio_tensor = doc.audio_url.load()
assert isinstance(doc.audio_tensor, np.ndarray)
"""
import io
file: Union[io.BytesIO, T]
if self.startswith('http'):
import requests
resp = requests.get(self)
resp.raise_for_status()
file = io.BytesIO()
file.write(resp.content)
file.seek(0)
else:
file = self
# note wave is Python built-in mod. https://docs.python.org/3/library/wave.html
with wave.open(file) as ifile:
samples = ifile.getnframes()
audio = ifile.readframes(samples)
# Convert buffer to float32 using NumPy
audio_as_np_int16 = np.frombuffer(audio, dtype=np.int16)
audio_as_np_float32 = audio_as_np_int16.astype(dtype=dtype)
# Normalise float32 array so that values are between -1.0 and +1.0
audio_norm = audio_as_np_float32 / MAX_INT_16
channels = ifile.getnchannels()
if channels == 2:
# 1 for mono, 2 for stereo
audio_stereo = np.empty((int(len(audio_norm) / channels), channels))
audio_stereo[:, 0] = audio_norm[range(0, len(audio_norm), 2)]
audio_stereo[:, 1] = audio_norm[range(1, len(audio_norm), 2)]
return parse_obj_as(AudioNdArray, audio_stereo)
else:
return parse_obj_as(AudioNdArray, audio_norm)
|
import pytest
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_from_to_bytes(protocol, compress, show_progress, array_cls):
da = array_cls[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_bytes(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = array_cls[MyDoc].from_bytes(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
@pytest.mark.parametrize('array_cls', [DocList, DocVec])
def test_from_to_base64(protocol, compress, show_progress, array_cls):
da = array_cls[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_base64(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = array_cls[MyDoc].from_base64(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
def test_union_type_error(tmp_path):
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
docs.from_bytes(docs.to_bytes())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_bytes(docs_basic.to_bytes())
assert docs_copy == docs_basic
|
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_from_to_bytes(protocol, compress, show_progress):
da = DocList[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_bytes(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocList[MyDoc].from_bytes(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_from_to_base64(protocol, compress, show_progress):
da = DocList[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_base64(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocList[MyDoc].from_base64(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
def test_union_type_error(tmp_path):
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
docs.from_bytes(docs.to_bytes())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_bytes(docs_basic.to_bytes())
assert docs_copy == docs_basic
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import pytest
import requests
import weaviate
HOST = "http://localhost:8080"
cur_dir = os.path.dirname(os.path.abspath(__file__))
weaviate_yml = os.path.abspath(os.path.join(cur_dir, "docker-compose.yml"))
@pytest.fixture(scope="session", autouse=True)
def start_storage():
os.system(f"docker compose -f {weaviate_yml} up -d --remove-orphans")
_wait_for_weaviate()
yield
os.system(f"docker compose -f {weaviate_yml} down --remove-orphans")
def _wait_for_weaviate():
while True:
try:
response = requests.get(f"{HOST}/v1/.well-known/ready")
if response.status_code == 200:
return
else:
time.sleep(0.5)
except requests.exceptions.ConnectionError:
time.sleep(1)
@pytest.fixture
def weaviate_client(start_storage):
client = weaviate.Client(HOST)
client.schema.delete_all()
yield client
client.schema.delete_all()
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import pytest
import requests
import weaviate
HOST = "http://localhost:8080"
cur_dir = os.path.dirname(os.path.abspath(__file__))
weaviate_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.fixture(scope='session', autouse=True)
def start_storage():
os.system(f"docker-compose -f {weaviate_yml} up -d --remove-orphans")
_wait_for_weaviate()
yield
os.system(f"docker-compose -f {weaviate_yml} down --remove-orphans")
def _wait_for_weaviate():
while True:
try:
response = requests.get(f"{HOST}/v1/.well-known/ready")
if response.status_code == 200:
return
else:
time.sleep(0.5)
except requests.exceptions.ConnectionError:
time.sleep(1)
@pytest.fixture
def weaviate_client(start_storage):
client = weaviate.Client(HOST)
client.schema.delete_all()
yield client
client.schema.delete_all()
|
from typing import Any, Callable, Optional
import torch
from .. import transforms
from .vision import VisionDataset
class FakeData(VisionDataset):
"""A fake dataset that returns randomly generated images and returns them as PIL images
Args:
size (int, optional): Size of the dataset. Default: 1000 images
image_size(tuple, optional): Size if the returned images. Default: (3, 224, 224)
num_classes(int, optional): Number of classes in the dataset. Default: 10
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
random_offset (int): Offsets the index-based random seed used to
generate each image. Default: 0
"""
def __init__(
self,
size: int = 1000,
image_size: tuple[int, int, int] = (3, 224, 224),
num_classes: int = 10,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
random_offset: int = 0,
) -> None:
super().__init__(transform=transform, target_transform=target_transform)
self.size = size
self.num_classes = num_classes
self.image_size = image_size
self.random_offset = random_offset
def __getitem__(self, index: int) -> tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# create random image that is consistent with the index id
if index >= len(self):
raise IndexError(f"{self.__class__.__name__} index out of range")
rng_state = torch.get_rng_state()
torch.manual_seed(index + self.random_offset)
img = torch.randn(*self.image_size)
target = torch.randint(0, self.num_classes, size=(1,), dtype=torch.long)[0]
torch.set_rng_state(rng_state)
# convert to PIL Image
img = transforms.ToPILImage()(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target.item()
def __len__(self) -> int:
return self.size
|
from typing import Any, Callable, Optional, Tuple
import torch
from .. import transforms
from .vision import VisionDataset
class FakeData(VisionDataset):
"""A fake dataset that returns randomly generated images and returns them as PIL images
Args:
size (int, optional): Size of the dataset. Default: 1000 images
image_size(tuple, optional): Size if the returned images. Default: (3, 224, 224)
num_classes(int, optional): Number of classes in the dataset. Default: 10
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
random_offset (int): Offsets the index-based random seed used to
generate each image. Default: 0
"""
def __init__(
self,
size: int = 1000,
image_size: Tuple[int, int, int] = (3, 224, 224),
num_classes: int = 10,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
random_offset: int = 0,
) -> None:
super().__init__(transform=transform, target_transform=target_transform)
self.size = size
self.num_classes = num_classes
self.image_size = image_size
self.random_offset = random_offset
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# create random image that is consistent with the index id
if index >= len(self):
raise IndexError(f"{self.__class__.__name__} index out of range")
rng_state = torch.get_rng_state()
torch.manual_seed(index + self.random_offset)
img = torch.randn(*self.image_size)
target = torch.randint(0, self.num_classes, size=(1,), dtype=torch.long)[0]
torch.set_rng_state(rng_state)
# convert to PIL Image
img = transforms.ToPILImage()(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target.item()
def __len__(self) -> int:
return self.size
|
from __future__ import annotations
from contextlib import nullcontext
import pytest
import torch
import tqdm
from torch.optim import Adam
from transformers import set_seed
from sentence_transformers import InputExample, SentenceTransformer, losses
@pytest.mark.parametrize(
["train_samples_mnrl", "train_samples_cmnrl", "same_grad", "scaler", "precision"],
[
(
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
True,
1.0,
1e-6,
),
(
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["adsa", "czx", "dsada"],
["b", "fas", "xcz"],
["c", "yyy", "asdas"],
)
],
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
False,
1.0,
1e-6,
),
(
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
True,
1000.0,
1e-3,
),
],
)
def test_cmnrl_same_grad(
train_samples_mnrl: list[InputExample],
train_samples_cmnrl: list[InputExample],
same_grad: bool,
scaler: float,
precision: float,
):
# Given:
sbert = SentenceTransformer("distilbert-base-uncased")
sbert.to("cpu")
optimizer = Adam(sbert.parameters())
# train_samples_mnrl
# train_samples_cmnrl
# same_grad
# scaler # This simulates AMP scenarios
# precision
# When:
# First run with MNRL
set_seed(42)
optimizer.zero_grad()
loss_mnrl = losses.MultipleNegativesRankingLoss(sbert)
loss_mnrl_value: torch.Tensor = loss_mnrl.forward(*sbert.smart_batching_collate(train_samples_mnrl)) * scaler
loss_mnrl_value.backward()
grad_expected = {name: p.grad.clone() for name, p in loss_mnrl.named_parameters() if p.grad is not None}
# Then run with this cached version:
set_seed(42)
optimizer.zero_grad()
loss_cmnrl = losses.CachedMultipleNegativesRankingLoss(sbert, mini_batch_size=2)
loss_cmnrl_value = loss_cmnrl.forward(*sbert.smart_batching_collate(train_samples_cmnrl)) * scaler
loss_cmnrl_value.backward()
grad = {name: p.grad.clone() for name, p in loss_cmnrl.named_parameters() if p.grad is not None}
# Then:
if same_grad:
assert pytest.approx(loss_mnrl_value.item()) == loss_cmnrl_value.item()
else:
assert pytest.approx(loss_mnrl_value.item()) != loss_cmnrl_value.item()
nclose = 0
for name in tqdm.tqdm(grad_expected):
nclose += torch.allclose(grad[name], grad_expected[name], precision, precision)
if same_grad:
assert nclose == len(grad_expected)
else:
assert nclose != len(grad_expected)
@pytest.mark.parametrize("use_rand_context", [True, False])
def test_rand_context_working(use_rand_context: bool):
# Given:
from sentence_transformers.losses.CachedMultipleNegativesRankingLoss import (
RandContext,
)
a = torch.Tensor(1)
b = torch.Tensor(1)
random_state = RandContext(a, b) if use_rand_context else nullcontext()
expected = torch.rand(1000)
precision = 1e-6
# When:
with random_state:
# Then:
if use_rand_context:
assert torch.allclose(torch.rand(1000), expected, precision, precision)
else:
assert not torch.allclose(torch.rand(1000), expected, precision, precision)
|
from contextlib import nullcontext
from typing import List
import pytest
import torch
import tqdm
from torch.optim import Adam
from transformers import set_seed
from sentence_transformers import InputExample, SentenceTransformer, losses
@pytest.mark.parametrize(
["train_samples_mnrl", "train_samples_cmnrl", "same_grad", "scaler", "precision"],
[
(
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
True,
1.0,
1e-6,
),
(
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["adsa", "czx", "dsada"],
["b", "fas", "xcz"],
["c", "yyy", "asdas"],
)
],
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
False,
1.0,
1e-6,
),
(
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
[
InputExample(texts=[q, p, n])
for q, p, n in zip(
["aaa", "bbb", "ccc", "ddd", "eee"],
["aas", "bbs", "ccs", "dds", "ees"],
["xxx", "yyy", "zzz", "kkk", "fff"],
)
],
True,
1000.0,
1e-3,
),
],
)
def test_cmnrl_same_grad(
train_samples_mnrl: List[InputExample],
train_samples_cmnrl: List[InputExample],
same_grad: bool,
scaler: float,
precision: float,
):
# Given:
sbert = SentenceTransformer("distilbert-base-uncased")
sbert.to("cpu")
optimizer = Adam(sbert.parameters())
# train_samples_mnrl
# train_samples_cmnrl
# same_grad
# scaler # This simulates AMP scenarios
# precision
# When:
# First run with MNRL
set_seed(42)
optimizer.zero_grad()
loss_mnrl = losses.MultipleNegativesRankingLoss(sbert)
loss_mnrl_value: torch.Tensor = loss_mnrl.forward(*sbert.smart_batching_collate(train_samples_mnrl)) * scaler
loss_mnrl_value.backward()
grad_expected = {name: p.grad.clone() for name, p in loss_mnrl.named_parameters() if p.grad is not None}
# Then run with this cached version:
set_seed(42)
optimizer.zero_grad()
loss_cmnrl = losses.CachedMultipleNegativesRankingLoss(sbert, mini_batch_size=2)
loss_cmnrl_value = loss_cmnrl.forward(*sbert.smart_batching_collate(train_samples_cmnrl)) * scaler
loss_cmnrl_value.backward()
grad = {name: p.grad.clone() for name, p in loss_cmnrl.named_parameters() if p.grad is not None}
# Then:
if same_grad:
assert pytest.approx(loss_mnrl_value.item()) == loss_cmnrl_value.item()
else:
assert pytest.approx(loss_mnrl_value.item()) != loss_cmnrl_value.item()
nclose = 0
for name in tqdm.tqdm(grad_expected):
nclose += torch.allclose(grad[name], grad_expected[name], precision, precision)
if same_grad:
assert nclose == len(grad_expected)
else:
assert nclose != len(grad_expected)
@pytest.mark.parametrize("use_rand_context", [True, False])
def test_rand_context_working(use_rand_context: bool):
# Given:
from sentence_transformers.losses.CachedMultipleNegativesRankingLoss import (
RandContext,
)
a = torch.Tensor(1)
b = torch.Tensor(1)
random_state = RandContext(a, b) if use_rand_context else nullcontext()
expected = torch.rand(1000)
precision = 1e-6
# When:
with random_state:
# Then:
if use_rand_context:
assert torch.allclose(torch.rand(1000), expected, precision, precision)
else:
assert not torch.allclose(torch.rand(1000), expected, precision, precision)
|
"""Init file."""
from llama_index.tools.zapier.base import (
ACTION_URL_TMPL,
ZapierToolSpec,
)
__all__ = ["ACTION_URL_TMPL", "ZapierToolSpec"]
|
"""Init file."""
from llama_index.tools.zapier.base import (
ACTION_URL_TMPL,
ZapierToolSpec,
)
__all__ = ["ACTION_URL_TMPL", "ZapierToolSpec"]
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor, VideoBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDoc):
"""
Document for handling video.
The Video Document can contain:
- a [`VideoUrl`][docarray.typing.url.VideoUrl] (`VideoDoc.url`)
- an [`AudioDoc`][docarray.documents.AudioDoc] (`VideoDoc.audio`)
- a [`VideoTensor`](../../../api_references/typing/tensor/video) (`VideoDoc.tensor`)
- an [`AnyTensor`](../../../api_references/typing/tensor/tensor) representing the indices of the video's key frames (`VideoDoc.key_frame_indices`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`VideoDoc.embedding`)
- a [`VideoBytes`][docarray.typing.bytes.VideoBytes] object (`VideoDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import VideoDoc
# use it directly
vid = VideoDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
# model = MyEmbeddingModel()
# vid.embedding = model(vid.tensor)
```
You can extend this Document:
```python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(VideoDoc):
name: Optional[TextDoc]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
video.name = TextDoc(text='my first video')
video.tensor = video.url.load().video
# model = MyEmbeddingModel()
# video.embedding = model(video.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDoc):
video: VideoDoc
text: TextDoc
mmdoc = MultiModalDoc(
video=VideoDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.video.tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes_ = mmdoc.video.url.load_bytes()
mmdoc.video.tensor = mmdoc.video.bytes_.load().video
```
"""
url: Optional[VideoUrl]
audio: Optional[AudioDoc] = AudioDoc()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[VideoBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor, VideoBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDoc):
"""
Document for handling video.
The Video Document can contain:
- a [`VideoUrl`][docarray.typing.url.VideoUrl] (`VideoDoc.url`)
- an [`AudioDoc`][docarray.documents.AudioDoc] (`VideoDoc.audio`)
- a [`VideoTensor`](../../../api_references/typing/tensor/video) (`VideoDoc.tensor`)
- an [`AnyTensor`](../../../api_references/typing/tensor/tensor) representing the indices of the video's key frames (`VideoDoc.key_frame_indices`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`VideoDoc.embedding`)
- a [`VideoBytes`][docarray.typing.bytes.VideoBytes] object (`VideoDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import VideoDoc
# use it directly
vid = VideoDoc(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
# model = MyEmbeddingModel()
# vid.embedding = model(vid.tensor)
```
You can extend this Document:
```python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(VideoDoc):
name: Optional[TextDoc]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.name = TextDoc(text='my first video')
video.tensor = video.url.load().video
# model = MyEmbeddingModel()
# video.embedding = model(video.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDoc):
video: VideoDoc
text: TextDoc
mmdoc = MultiModalDoc(
video=VideoDoc(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.video.tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes_ = mmdoc.video.url.load_bytes()
mmdoc.video.tensor = mmdoc.video.bytes_.load().video
```
"""
url: Optional[VideoUrl]
audio: Optional[AudioDoc] = AudioDoc()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[VideoBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
# coding: utf-8
import pytest
import lightgbm as lgb
from .utils import SERIALIZERS, pickle_and_unpickle_object
def reset_feature_fraction(boosting_round):
return 0.6 if boosting_round < 15 else 0.8
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_early_stopping_callback_is_picklable(serializer):
rounds = 5
callback = lgb.early_stopping(stopping_rounds=rounds)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 30
assert callback_from_disk.before_iteration is False
assert callback.stopping_rounds == callback_from_disk.stopping_rounds
assert callback.stopping_rounds == rounds
def test_early_stopping_callback_rejects_invalid_stopping_rounds_with_informative_errors():
with pytest.raises(ValueError, match="stopping_rounds should be an integer and greater than 0. got: 0"):
lgb.early_stopping(stopping_rounds=0)
with pytest.raises(ValueError, match="stopping_rounds should be an integer and greater than 0. got: -1"):
lgb.early_stopping(stopping_rounds=-1)
with pytest.raises(ValueError, match="stopping_rounds should be an integer and greater than 0. got: neverrrr"):
lgb.early_stopping(stopping_rounds="neverrrr")
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_log_evaluation_callback_is_picklable(serializer):
periods = 42
callback = lgb.log_evaluation(period=periods)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 10
assert callback_from_disk.before_iteration is False
assert callback.period == callback_from_disk.period
assert callback.period == periods
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_record_evaluation_callback_is_picklable(serializer):
results = {}
callback = lgb.record_evaluation(eval_result=results)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 20
assert callback_from_disk.before_iteration is False
assert callback.eval_result == callback_from_disk.eval_result
assert callback.eval_result is results
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_reset_parameter_callback_is_picklable(serializer):
params = {
'bagging_fraction': [0.7] * 5 + [0.6] * 5,
'feature_fraction': reset_feature_fraction
}
callback = lgb.reset_parameter(**params)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 10
assert callback_from_disk.before_iteration is True
assert callback.kwargs == callback_from_disk.kwargs
assert callback.kwargs == params
|
# coding: utf-8
import pytest
import lightgbm as lgb
from .utils import SERIALIZERS, pickle_and_unpickle_object
def reset_feature_fraction(boosting_round):
return 0.6 if boosting_round < 15 else 0.8
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_early_stopping_callback_is_picklable(serializer):
rounds = 5
callback = lgb.early_stopping(stopping_rounds=rounds)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 30
assert callback_from_disk.before_iteration is False
assert callback.stopping_rounds == callback_from_disk.stopping_rounds
assert callback.stopping_rounds == rounds
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_log_evaluation_callback_is_picklable(serializer):
periods = 42
callback = lgb.log_evaluation(period=periods)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 10
assert callback_from_disk.before_iteration is False
assert callback.period == callback_from_disk.period
assert callback.period == periods
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_record_evaluation_callback_is_picklable(serializer):
results = {}
callback = lgb.record_evaluation(eval_result=results)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 20
assert callback_from_disk.before_iteration is False
assert callback.eval_result == callback_from_disk.eval_result
assert callback.eval_result is results
@pytest.mark.parametrize('serializer', SERIALIZERS)
def test_reset_parameter_callback_is_picklable(serializer):
params = {
'bagging_fraction': [0.7] * 5 + [0.6] * 5,
'feature_fraction': reset_feature_fraction
}
callback = lgb.reset_parameter(**params)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 10
assert callback_from_disk.before_iteration is True
assert callback.kwargs == callback_from_disk.kwargs
assert callback.kwargs == params
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import NdArray, PointCloud3DUrl
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
tensors = url.load(samples=n_samples)
assert isinstance(tensors.points, np.ndarray)
assert isinstance(tensors.points, NdArray)
assert tensors.points.shape == (n_samples, 3)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load_with_multiple_geometries_true(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
tensors = url.load(samples=n_samples, multiple_geometries=True)
assert isinstance(tensors.points, np.ndarray)
assert len(tensors.points.shape) == 3
assert tensors.points.shape[1:] == (100, 3)
def test_json_schema():
schema_json_of(PointCloud3DUrl)
def test_dump_json():
url = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='PointCloud3DUrl'):
parse_obj_as(PointCloud3DUrl, path_to_file)
else:
url = parse_obj_as(PointCloud3DUrl, path_to_file)
assert isinstance(url, PointCloud3DUrl)
assert isinstance(url, str)
@pytest.mark.proto
def test_proto_point_cloud_url():
uri = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import NdArray, PointCloud3DUrl
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
tensors = url.load(samples=n_samples)
assert isinstance(tensors.points, np.ndarray)
assert isinstance(tensors.points, NdArray)
assert tensors.points.shape == (n_samples, 3)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load_with_multiple_geometries_true(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
tensors = url.load(samples=n_samples, multiple_geometries=True)
assert isinstance(tensors.points, np.ndarray)
assert len(tensors.points.shape) == 3
assert tensors.points.shape[1:] == (100, 3)
def test_json_schema():
schema_json_of(PointCloud3DUrl)
def test_dump_json():
url = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='PointCloud3DUrl'):
parse_obj_as(PointCloud3DUrl, path_to_file)
else:
url = parse_obj_as(PointCloud3DUrl, path_to_file)
assert isinstance(url, PointCloud3DUrl)
assert isinstance(url, str)
@pytest.mark.proto
def test_proto_point_cloud_url():
uri = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
RunnableSerializable --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
<name> # Examples: BraveSearch, HumanInputRun
**Main helpers:**
.. code-block::
CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
""" # noqa: E501
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.tools.base import (
FILTERED_ARGS,
ArgsSchema,
BaseTool,
BaseToolkit,
InjectedToolArg,
InjectedToolCallId,
SchemaAnnotationError,
ToolException,
_get_runnable_config_param,
create_schema_from_function,
)
from langchain_core.tools.convert import (
convert_runnable_to_tool,
tool,
)
from langchain_core.tools.render import (
ToolsRenderer,
render_text_description,
render_text_description_and_args,
)
from langchain_core.tools.retriever import (
RetrieverInput,
create_retriever_tool,
)
from langchain_core.tools.simple import Tool
from langchain_core.tools.structured import StructuredTool
__all__ = (
"ArgsSchema",
"BaseTool",
"BaseToolkit",
"FILTERED_ARGS",
"SchemaAnnotationError",
"ToolException",
"InjectedToolArg",
"InjectedToolCallId",
"_get_runnable_config_param",
"create_schema_from_function",
"convert_runnable_to_tool",
"tool",
"ToolsRenderer",
"render_text_description",
"render_text_description_and_args",
"RetrieverInput",
"create_retriever_tool",
"Tool",
"StructuredTool",
)
_dynamic_imports = {
"FILTERED_ARGS": "base",
"ArgsSchema": "base",
"BaseTool": "base",
"BaseToolkit": "base",
"InjectedToolArg": "base",
"InjectedToolCallId": "base",
"SchemaAnnotationError": "base",
"ToolException": "base",
"_get_runnable_config_param": "base",
"create_schema_from_function": "base",
"convert_runnable_to_tool": "convert",
"tool": "convert",
"ToolsRenderer": "render",
"render_text_description": "render",
"render_text_description_and_args": "render",
"RetrieverInput": "retriever",
"create_retriever_tool": "retriever",
"Tool": "simple",
"StructuredTool": "structured",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
RunnableSerializable --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
<name> # Examples: BraveSearch, HumanInputRun
**Main helpers:**
.. code-block::
CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
""" # noqa: E501
from __future__ import annotations
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.tools.base import (
FILTERED_ARGS,
ArgsSchema,
BaseTool,
BaseToolkit,
InjectedToolArg,
InjectedToolCallId,
SchemaAnnotationError,
ToolException,
_get_runnable_config_param,
create_schema_from_function,
)
from langchain_core.tools.convert import (
convert_runnable_to_tool,
tool,
)
from langchain_core.tools.render import (
ToolsRenderer,
render_text_description,
render_text_description_and_args,
)
from langchain_core.tools.retriever import (
RetrieverInput,
create_retriever_tool,
)
from langchain_core.tools.simple import Tool
from langchain_core.tools.structured import StructuredTool
__all__ = [
"ArgsSchema",
"BaseTool",
"BaseToolkit",
"FILTERED_ARGS",
"SchemaAnnotationError",
"ToolException",
"InjectedToolArg",
"InjectedToolCallId",
"_get_runnable_config_param",
"create_schema_from_function",
"convert_runnable_to_tool",
"tool",
"ToolsRenderer",
"render_text_description",
"render_text_description_and_args",
"RetrieverInput",
"create_retriever_tool",
"Tool",
"StructuredTool",
]
_dynamic_imports = {
"FILTERED_ARGS": "base",
"ArgsSchema": "base",
"BaseTool": "base",
"BaseToolkit": "base",
"InjectedToolArg": "base",
"InjectedToolCallId": "base",
"SchemaAnnotationError": "base",
"ToolException": "base",
"_get_runnable_config_param": "base",
"create_schema_from_function": "base",
"convert_runnable_to_tool": "convert",
"tool": "convert",
"ToolsRenderer": "render",
"render_text_description": "render",
"render_text_description_and_args": "render",
"RetrieverInput": "retriever",
"create_retriever_tool": "retriever",
"Tool": "simple",
"StructuredTool": "structured",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
from typing import Any, Dict, List, Optional, Sequence, Tuple
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
DEFAULT_ANYSCALE_API_BASE = "https://api.endpoints.anyscale.com/v1"
DEFAULT_ANYSCALE_API_VERSION = ""
LLAMA_MODELS = {
"meta-llama/Meta-Llama-3-70B-Instruct": 8192,
"meta-llama/Meta-Llama-3-8B-Instruct": 8192,
"meta-llama/Llama-2-7b-chat-hf": 4096,
"meta-llama/Llama-2-13b-chat-hf": 4096,
"meta-llama/Llama-2-70b-chat-hf": 4096,
"codellama/CodeLlama-34b-Instruct-hf": 16384,
"Meta-Llama/Llama-Guard-7b": 4096,
}
MISTRAL_MODELS = {
"mistralai/Mistral-7B-Instruct-v0.1": 16384,
"Open-Orca/Mistral-7B-OpenOrca": 8192,
"mistralai/Mixtral-8x7B-Instruct-v0.1": 32768,
}
ZEPHYR_MODELS = {
"HuggingFaceH4/zephyr-7b-beta": 16384,
}
ALL_AVAILABLE_MODELS = {
**LLAMA_MODELS,
**MISTRAL_MODELS,
**ZEPHYR_MODELS,
}
DISCONTINUED_MODELS: Dict[str, int] = {}
def anyscale_modelname_to_contextsize(modelname: str) -> int:
"""
Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = anyscale_modelname_to_contextsize(model_name)
"""
# handling finetuned models
# TO BE FILLED
if modelname in DISCONTINUED_MODELS:
raise ValueError(
f"Anyscale hosted model {modelname} has been discontinued. "
"Please choose another model."
)
context_size = ALL_AVAILABLE_MODELS.get(modelname)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid Anyscale model name."
"Known models are: " + ", ".join(ALL_AVAILABLE_MODELS.keys())
)
return context_size
def _message_to_anyscale_prompt(message: ChatMessage) -> Dict[str, Any]:
if message.role == MessageRole.USER:
prompt = {"role": "user", "content": message.content}
elif message.role == MessageRole.ASSISTANT:
prompt = {"role": "assistant", "content": message.content}
elif message.role == MessageRole.SYSTEM:
prompt = {"role": "system", "content": message.content}
elif message.role == MessageRole.FUNCTION:
raise ValueError(f"Message role {MessageRole.FUNCTION} is not supported.")
else:
raise ValueError(f"Unknown message role: {message.role}")
return prompt
def messages_to_anyscale_prompt(messages: Sequence[ChatMessage]) -> List[Dict]:
if len(messages) == 0:
raise ValueError("Got empty list of messages.")
return [_message_to_anyscale_prompt(message) for message in messages]
def resolve_anyscale_credentials(
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
) -> Tuple[Optional[str], str, str]:
"""
"Resolve OpenAI credentials.
The order of precedence is:
1. param
2. env
3. openai module
4. default
"""
# resolve from param or env
api_key = get_from_param_or_env("api_key", api_key, "ANYSCALE_API_KEY", "")
api_base = get_from_param_or_env("api_base", api_base, "ANYSCALE_API_BASE", "")
api_version = get_from_param_or_env(
"api_version", api_version, "ANYSCALE_API_VERSION", ""
)
# resolve from openai module or default
final_api_key = api_key or ""
final_api_base = api_base or DEFAULT_ANYSCALE_API_BASE
final_api_version = api_version or DEFAULT_ANYSCALE_API_VERSION
return final_api_key, str(final_api_base), final_api_version
|
from typing import Any, Dict, List, Optional, Sequence, Tuple
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
DEFAULT_ANYSCALE_API_BASE = "https://api.endpoints.anyscale.com/v1"
DEFAULT_ANYSCALE_API_VERSION = ""
LLAMA_MODELS = {
"meta-llama/Meta-Llama-3-70B-Instruct": 8192,
"meta-llama/Meta-Llama-3-8B-Instruct": 8192,
"meta-llama/Llama-2-7b-chat-hf": 4096,
"meta-llama/Llama-2-13b-chat-hf": 4096,
"meta-llama/Llama-2-70b-chat-hf": 4096,
"codellama/CodeLlama-34b-Instruct-hf": 16384,
"Meta-Llama/Llama-Guard-7b": 4096,
}
MISTRAL_MODELS = {
"mistralai/Mistral-7B-Instruct-v0.1": 16384,
"Open-Orca/Mistral-7B-OpenOrca": 8192,
"mistralai/Mixtral-8x7B-Instruct-v0.1": 32768,
}
ZEPHYR_MODELS = {
"HuggingFaceH4/zephyr-7b-beta": 16384,
}
ALL_AVAILABLE_MODELS = {
**LLAMA_MODELS,
**MISTRAL_MODELS,
**ZEPHYR_MODELS,
}
DISCONTINUED_MODELS: Dict[str, int] = {}
def anyscale_modelname_to_contextsize(modelname: str) -> int:
"""
Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = anyscale_modelname_to_contextsize(model_name)
"""
# handling finetuned models
# TO BE FILLED
if modelname in DISCONTINUED_MODELS:
raise ValueError(
f"Anyscale hosted model {modelname} has been discontinued. "
"Please choose another model."
)
context_size = ALL_AVAILABLE_MODELS.get(modelname, None)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid Anyscale model name."
"Known models are: " + ", ".join(ALL_AVAILABLE_MODELS.keys())
)
return context_size
def _message_to_anyscale_prompt(message: ChatMessage) -> Dict[str, Any]:
if message.role == MessageRole.USER:
prompt = {"role": "user", "content": message.content}
elif message.role == MessageRole.ASSISTANT:
prompt = {"role": "assistant", "content": message.content}
elif message.role == MessageRole.SYSTEM:
prompt = {"role": "system", "content": message.content}
elif message.role == MessageRole.FUNCTION:
raise ValueError(f"Message role {MessageRole.FUNCTION} is not supported.")
else:
raise ValueError(f"Unknown message role: {message.role}")
return prompt
def messages_to_anyscale_prompt(messages: Sequence[ChatMessage]) -> List[Dict]:
if len(messages) == 0:
raise ValueError("Got empty list of messages.")
return [_message_to_anyscale_prompt(message) for message in messages]
def resolve_anyscale_credentials(
api_key: Optional[str] = None,
api_base: Optional[str] = None,
api_version: Optional[str] = None,
) -> Tuple[Optional[str], str, str]:
"""
"Resolve OpenAI credentials.
The order of precedence is:
1. param
2. env
3. openai module
4. default
"""
# resolve from param or env
api_key = get_from_param_or_env("api_key", api_key, "ANYSCALE_API_KEY", "")
api_base = get_from_param_or_env("api_base", api_base, "ANYSCALE_API_BASE", "")
api_version = get_from_param_or_env(
"api_version", api_version, "ANYSCALE_API_VERSION", ""
)
# resolve from openai module or default
final_api_key = api_key or ""
final_api_base = api_base or DEFAULT_ANYSCALE_API_BASE
final_api_version = api_version or DEFAULT_ANYSCALE_API_VERSION
return final_api_key, str(final_api_base), final_api_version
|
# Copyright (c) OpenMMLab. All rights reserved.
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_method, import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, requires_executable, requires_package,
slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple,
to_ntuple, tuple_cast)
from .package_utils import (call_command, check_install_package,
get_installed_path, is_installed)
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .progressbar import (ProgressBar, track_iter_progress,
track_parallel_progress, track_progress)
from .timer import Timer, TimerError, check_time
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_installed', 'call_command', 'get_installed_path',
'check_install_package', 'is_abs', 'is_method_overridden', 'has_method',
'digit_version', 'get_git_hash', 'ManagerMeta', 'ManagerMixin', 'Timer',
'check_time', 'TimerError', 'ProgressBar', 'track_iter_progress',
'track_parallel_progress', 'track_progress'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_batch_norm, has_method, import_modules_from_strings,
is_list_of, is_method_overridden, is_seq_of, is_str,
is_tuple_of, iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .package_utils import (call_command, check_install_package,
get_installed_path, is_installed)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .progressbar import (ProgressBar, track_iter_progress,
track_parallel_progress, track_progress)
from .setup_env import set_multi_processing
from .sync_bn import revert_sync_batchnorm
from .timer import Timer, TimerError, check_time
from .torch_ops import torch_meshgrid
from .trace import is_jit_tracing
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'ManagerMeta', 'ManagerMixin', 'set_multi_processing', 'has_batch_norm',
'is_abs', 'is_installed', 'call_command', 'get_installed_path',
'check_install_package', 'is_abs', 'revert_sync_batchnorm', 'collect_env',
'Timer', 'check_time', 'TimerError', 'ProgressBar', 'track_iter_progress',
'track_parallel_progress', 'track_progress', 'torch_meshgrid',
'is_jit_tracing'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.distributed.rpc import is_available
from mmengine.dist import is_main_process
from mmengine.utils import digit_version
from mmengine.utils.dl_utils import TORCH_VERSION
try:
from torch.distributed.optim import \
ZeroRedundancyOptimizer as _ZeroRedundancyOptimizer
except ImportError:
_ZeroRedundancyOptimizer = object
from .builder import OPTIMIZERS
@OPTIMIZERS.register_module()
class ZeroRedundancyOptimizer(_ZeroRedundancyOptimizer):
"""A wrapper class of :class:`ZeroRedundancyOptimizer` that gets a
optimizer type as string.
This class wraps an arbitrary :class:`torch.optim.Optimizer` and shards its
states across ranks in the group as described by ZeRO_. The local optimizer
instance in each rank is only responsible for updating approximately
``1 / world_size`` parameters and hence only needs to keep
``1 / world_size`` optimizer states. After parameters are updated locally,
each rank will broadcast its parameters to all other peers to keep all
model replicas in the same state. ``ZeroRedundancyOptimizer`` can be used
in conjunction with :class:`torch.nn.parallel.DistributedDataParallel` to
reduce per-rank peak memory consumption.
``ZeroRedundancyOptimizer`` uses a sorted-greedy algorithm to pack a number
of parameters at each rank. Each parameter belongs to a single rank and is
not divided among ranks. The partition is arbitrary and might not match the
the parameter registration or usage order.
Warnings:
``ZeroRedundancyOptimizer`` requires PyTorch >= 1.8.
Warnings:
``ZeroRedundancyOptimizer`` requires PyTorch >= 1.12 to enable param
groups.
Args:
params (``Iterable``): an ``Iterable`` of :class:`torch.Tensor` s
or :class:`dict` s giving all parameters, which will be sharded
across ranks.
optimizer_type (str): the string of the local optimizer class.
.. _ZeRO: https://arxiv.org/abs/1910.02054
"""
def __init__(self, params, optimizer_type: str, **kwargs):
assert digit_version(TORCH_VERSION) >= digit_version('1.8.0'), (
'`torch.distributed.optim.ZeroReundancyOptimizer` is only '
'available when pytorch version >= 1.8.')
assert is_available(), 'torch.distributed.rpc is not available.'
# Avoid the generator becoming empty after the following check
params = list(params)
assert (
all(isinstance(p, torch.Tensor) for p in params)
or digit_version(TORCH_VERSION) >= digit_version('1.12.0')), (
'PyTorch ZeroRedundancyOptimizer started to support param '
'groups since 1.12.0. Please update your pytorch version to '
'enable this feature, or disable param groups by deleting '
'`paramwise_cfg` filed in config file.')
optimizer_class = getattr(torch.optim, optimizer_type)
# TODO: Register a DDP communication hook for `overlap_with_ddp=True`.
# Currently only `overlap_with_ddp=False` is supported. For more
# details, please refer to the pytorch's official documentation.
super().__init__(params, optimizer_class, **kwargs)
def state_dict(self):
"""Consolidate `state_dict`s from ranks to save the `state_dict`."""
self.consolidate_state_dict()
state_dict = super().state_dict() if is_main_process() else dict()
return state_dict
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.distributed.rpc import is_available
from mmengine.dist import is_main_process
from mmengine.utils import digit_version
from mmengine.utils.dl_utils import TORCH_VERSION
try:
from torch.distributed.optim import \
ZeroRedundancyOptimizer as _ZeroRedundancyOptimizer
except ImportError:
_ZeroRedundancyOptimizer = object
from .builder import OPTIMIZERS
@OPTIMIZERS.register_module()
class ZeroRedundancyOptimizer(_ZeroRedundancyOptimizer):
"""A wrapper class of :class:`ZeroRedundancyOptimizer` that gets a
optimizer type as string.
This class wraps an arbitrary :class:`torch.optim.Optimizer` and shards its
states across ranks in the group as described by ZeRO_. The local optimizer
instance in each rank is only responsible for updating approximately
``1 / world_size`` parameters and hence only needs to keep
``1 / world_size`` optimizer states. After parameters are updated locally,
each rank will broadcast its parameters to all other peers to keep all
model replicas in the same state. ``ZeroRedundancyOptimizer`` can be used
in conjunction with :class:`torch.nn.parallel.DistributedDataParallel` to
reduce per-rank peak memory consumption.
``ZeroRedundancyOptimizer`` uses a sorted-greedy algorithm to pack a number
of parameters at each rank. Each parameter belongs to a single rank and is
not divided among ranks. The partition is arbitrary and might not match the
the parameter registration or usage order.
Warnings:
``ZeroRedundancyOptimizer`` requires PyTorch >= 1.8.
Args:
params (``Iterable``): an ``Iterable`` of :class:`torch.Tensor` s
or :class:`dict` s giving all parameters, which will be sharded
across ranks.
optimizer_type (str): the string of the local optimizer class.
.. _ZeRO: https://arxiv.org/abs/1910.02054
"""
def __init__(self, params, optimizer_type: str, **kwargs):
assert digit_version(TORCH_VERSION) >= digit_version('1.8.0'), (
'`torch.distributed.optim.ZeroReundancyOptimizer` is only '
'available when pytorch version >= 1.8.')
assert is_available(), 'torch.distributed.rpc is not available.'
optimizer_class = getattr(torch.optim, optimizer_type)
# TODO: Register a DDP communication hook for `overlap_with_ddp=True`.
# Currently only `overlap_with_ddp=False` is supported. For more
# details, please refer to the pytorch's official documentation.
super().__init__(params, optimizer_class, **kwargs)
def state_dict(self):
"""Consolidate `state_dict`s from ranks to save the `state_dict`."""
self.consolidate_state_dict()
state_dict = super().state_dict() if is_main_process() else dict()
return state_dict
|
import pytest
@pytest.mark.parametrize(
"model,expected",
[
("librispeech", ["the", "captain", "shook", "his", "head"]),
("librispeech-3-gram", ["the", "captain", "shook", "his", "head"]),
],
)
def test_decoder_from_pretrained(model, expected, emissions):
from torchaudio.prototype.ctc_decoder import ctc_decoder, download_pretrained_files
pretrained_files = download_pretrained_files(model)
decoder = ctc_decoder(
lexicon=pretrained_files.lexicon,
tokens=pretrained_files.tokens,
lm=pretrained_files.lm,
)
result = decoder(emissions)
assert result[0][0].words == expected
|
import pytest
@pytest.mark.parametrize(
"model,expected",
[
("librispeech", ["the", "captain", "shook", "his", "head"]),
("librispeech-3-gram", ["the", "captain", "shook", "his", "head"]),
],
)
def test_decoder_from_pretrained(model, expected, emissions):
from torchaudio.prototype.ctc_decoder import lexicon_decoder, download_pretrained_files
pretrained_files = download_pretrained_files(model)
decoder = lexicon_decoder(
lexicon=pretrained_files.lexicon,
tokens=pretrained_files.tokens,
lm=pretrained_files.lm,
)
result = decoder(emissions)
assert result[0][0].words == expected
|
"""Pathway reader."""
import json
from typing import List, Optional
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
# Copied from https://github.com/pathwaycom/pathway/blob/main/python/pathway/xpacks/llm/vector_store.py
# to remove dependency on Pathway library when only the client is used.
class _VectorStoreClient:
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
url: Optional[str] = None,
):
"""
A client you can use to query :py:class:`VectorStoreServer`.
Please provide either the `url`, or `host` and `port`.
Args:
- host: host on which `:py:class:`VectorStoreServer` listens
- port: port on which `:py:class:`VectorStoreServer` listens
- url: url at which `:py:class:`VectorStoreServer` listens
"""
err = "Either (`host` and `port`) or `url` must be provided, but not both."
if url is not None:
if host or port:
raise ValueError(err)
self.url = url
else:
if host is None:
raise ValueError(err)
port = port or 80
self.url = f"http://{host}:{port}"
def query(
self, query: str, k: int = 3, metadata_filter: Optional[str] = None
) -> List[dict]:
"""
Perform a query to the vector store and fetch results.
Args:
- query:
- k: number of documents to be returned
- metadata_filter: optional string representing the metadata filtering query
in the JMESPath format. The search will happen only for documents
satisfying this filtering.
"""
data = {"query": query, "k": k}
if metadata_filter is not None:
data["metadata_filter"] = metadata_filter
url = self.url + "/v1/retrieve"
response = requests.post(
url,
data=json.dumps(data),
headers={"Content-Type": "application/json"},
timeout=3,
)
return response.json()
# Make an alias
__call__ = query
def get_vectorstore_statistics(self) -> dict:
"""Fetch basic statistics about the vector store."""
url = self.url + "/v1/statistics"
response = requests.post(
url,
json={},
headers={"Content-Type": "application/json"},
)
return response.json()
def get_input_files(
self,
metadata_filter: Optional[str] = None,
filepath_globpattern: Optional[str] = None,
) -> list:
"""
Fetch information on documents in the vector store.
Args:
metadata_filter: optional string representing the metadata filtering query
in the JMESPath format. The search will happen only for documents
satisfying this filtering.
filepath_globpattern: optional glob pattern specifying which documents
will be searched for this query.
"""
url = self.url + "/v1/inputs"
response = requests.post(
url,
json={
"metadata_filter": metadata_filter,
"filepath_globpattern": filepath_globpattern,
},
headers={"Content-Type": "application/json"},
)
return response.json()
class PathwayReader(BaseReader):
"""
Pathway reader.
Retrieve documents from Pathway data indexing pipeline.
Args:
host (str): The URI where Pathway is currently hosted.
port (str | int): The port number on which Pathway is listening.
See Also:
llamaindex.retriever.pathway.PathwayRetriever and,
llamaindex.retriever.pathway.PathwayVectorServer
"""
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
url: Optional[str] = None,
):
"""Initializing the Pathway reader client."""
self.client = _VectorStoreClient(host, port, url)
def load_data(
self,
query_text: str,
k: Optional[int] = 4,
metadata_filter: Optional[str] = None,
) -> List[Document]:
"""
Load data from Pathway.
Args:
query_text (str): The text to get the closest neighbors of.
k (int): Number of results to return.
metadata_filter (str): Filter to be applied.
Returns:
List[Document]: A list of documents.
"""
results = self.client(query_text, k, metadata_filter)
documents = []
for return_elem in results:
document = Document(
text=return_elem["text"],
extra_info=return_elem["metadata"],
)
documents.append(document)
return documents
|
"""Pathway reader."""
import json
from typing import List, Optional
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
# Copied from https://github.com/pathwaycom/pathway/blob/main/python/pathway/xpacks/llm/vector_store.py
# to remove dependency on Pathway library when only the client is used.
class _VectorStoreClient:
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
url: Optional[str] = None,
):
"""
A client you can use to query :py:class:`VectorStoreServer`.
Please provide either the `url`, or `host` and `port`.
Args:
- host: host on which `:py:class:`VectorStoreServer` listens
- port: port on which `:py:class:`VectorStoreServer` listens
- url: url at which `:py:class:`VectorStoreServer` listens
"""
err = "Either (`host` and `port`) or `url` must be provided, but not both."
if url is not None:
if host or port:
raise ValueError(err)
self.url = url
else:
if host is None:
raise ValueError(err)
port = port or 80
self.url = f"http://{host}:{port}"
def query(
self, query: str, k: int = 3, metadata_filter: Optional[str] = None
) -> List[dict]:
"""
Perform a query to the vector store and fetch results.
Args:
- query:
- k: number of documents to be returned
- metadata_filter: optional string representing the metadata filtering query
in the JMESPath format. The search will happen only for documents
satisfying this filtering.
"""
data = {"query": query, "k": k}
if metadata_filter is not None:
data["metadata_filter"] = metadata_filter
url = self.url + "/v1/retrieve"
response = requests.post(
url,
data=json.dumps(data),
headers={"Content-Type": "application/json"},
timeout=3,
)
return response.json()
# Make an alias
__call__ = query
def get_vectorstore_statistics(self) -> dict:
"""Fetch basic statistics about the vector store."""
url = self.url + "/v1/statistics"
response = requests.post(
url,
json={},
headers={"Content-Type": "application/json"},
)
return response.json()
def get_input_files(
self,
metadata_filter: Optional[str] = None,
filepath_globpattern: Optional[str] = None,
) -> list:
"""
Fetch information on documents in the vector store.
Args:
metadata_filter: optional string representing the metadata filtering query
in the JMESPath format. The search will happen only for documents
satisfying this filtering.
filepath_globpattern: optional glob pattern specifying which documents
will be searched for this query.
"""
url = self.url + "/v1/inputs"
response = requests.post(
url,
json={
"metadata_filter": metadata_filter,
"filepath_globpattern": filepath_globpattern,
},
headers={"Content-Type": "application/json"},
)
return response.json()
class PathwayReader(BaseReader):
"""Pathway reader.
Retrieve documents from Pathway data indexing pipeline.
Args:
host (str): The URI where Pathway is currently hosted.
port (str | int): The port number on which Pathway is listening.
See Also:
llamaindex.retriever.pathway.PathwayRetriever and,
llamaindex.retriever.pathway.PathwayVectorServer
"""
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
url: Optional[str] = None,
):
"""Initializing the Pathway reader client."""
self.client = _VectorStoreClient(host, port, url)
def load_data(
self,
query_text: str,
k: Optional[int] = 4,
metadata_filter: Optional[str] = None,
) -> List[Document]:
"""Load data from Pathway.
Args:
query_text (str): The text to get the closest neighbors of.
k (int): Number of results to return.
metadata_filter (str): Filter to be applied.
Returns:
List[Document]: A list of documents.
"""
results = self.client(query_text, k, metadata_filter)
documents = []
for return_elem in results:
document = Document(
text=return_elem["text"],
extra_info=return_elem["metadata"],
)
documents.append(document)
return documents
|
"""Tool for the Dataherald Hosted API"""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.dataherald import DataheraldAPIWrapper
class DataheraldTextToSQLInput(BaseModel):
prompt: str = Field(
description="Natural language query to be translated to a SQL query."
)
class DataheraldTextToSQL(BaseTool):
"""Tool that queries using the Dataherald SDK."""
name: str = "dataherald"
description: str = (
"A wrapper around Dataherald. "
"Text to SQL. "
"Input should be a prompt and an existing db_connection_id"
)
api_wrapper: DataheraldAPIWrapper
args_schema: Type[BaseModel] = DataheraldTextToSQLInput
def _run(
self,
prompt: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Dataherald tool."""
return self.api_wrapper.run(prompt)
|
"""Tool for the Dataherald Hosted API"""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.dataherald import DataheraldAPIWrapper
class DataheraldTextToSQLInput(BaseModel):
prompt: str = Field(
description="Natural language query to be translated to a SQL query."
)
class DataheraldTextToSQL(BaseTool): # type: ignore[override, override]
"""Tool that queries using the Dataherald SDK."""
name: str = "dataherald"
description: str = (
"A wrapper around Dataherald. "
"Text to SQL. "
"Input should be a prompt and an existing db_connection_id"
)
api_wrapper: DataheraldAPIWrapper
args_schema: Type[BaseModel] = DataheraldTextToSQLInput
def _run(
self,
prompt: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Dataherald tool."""
return self.api_wrapper.run(prompt)
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat, ConvertDtype, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
RemoveSmallBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat, ConvertDtype, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
RemoveSmallBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import Grayscale, RandomGrayscale, ToTensor # usort: skip
|
import datetime
import uuid
from unittest.mock import MagicMock, patch
from langsmith.schemas import Example
from langchain_core.document_loaders import LangSmithLoader
from langchain_core.documents import Document
def test_init() -> None:
LangSmithLoader(api_key="secret")
EXAMPLES = [
Example(
inputs={"first": {"second": "foo"}},
outputs={"res": "a"},
dataset_id=uuid.uuid4(),
id=uuid.uuid4(),
created_at=datetime.datetime.now(datetime.timezone.utc),
),
Example(
inputs={"first": {"second": "bar"}},
outputs={"res": "b"},
dataset_id=uuid.uuid4(),
id=uuid.uuid4(),
created_at=datetime.datetime.now(datetime.timezone.utc),
),
Example(
inputs={"first": {"second": "baz"}},
outputs={"res": "c"},
dataset_id=uuid.uuid4(),
id=uuid.uuid4(),
created_at=datetime.datetime.now(datetime.timezone.utc),
),
]
@patch("langsmith.Client.list_examples", MagicMock(return_value=iter(EXAMPLES)))
def test_lazy_load() -> None:
loader = LangSmithLoader(
api_key="dummy",
dataset_id="mock",
content_key="first.second",
format_content=(lambda x: x.upper()),
)
expected = []
for example in EXAMPLES:
metadata = {
k: v if not v or isinstance(v, dict) else str(v)
for k, v in example.dict().items()
}
expected.append(
Document(example.inputs["first"]["second"].upper(), metadata=metadata)
if example.inputs
else None
)
actual = list(loader.lazy_load())
assert expected == actual
|
import datetime
import uuid
from unittest.mock import MagicMock, patch
from langsmith.schemas import Example
from langchain_core.document_loaders import LangSmithLoader
from langchain_core.documents import Document
def test_init() -> None:
LangSmithLoader(api_key="secret")
EXAMPLES = [
Example(
inputs={"first": {"second": "foo"}},
outputs={"res": "a"},
dataset_id=uuid.uuid4(),
id=uuid.uuid4(),
created_at=datetime.datetime.now(datetime.timezone.utc),
),
Example(
inputs={"first": {"second": "bar"}},
outputs={"res": "b"},
dataset_id=uuid.uuid4(),
id=uuid.uuid4(),
created_at=datetime.datetime.now(datetime.timezone.utc),
),
Example(
inputs={"first": {"second": "baz"}},
outputs={"res": "c"},
dataset_id=uuid.uuid4(),
id=uuid.uuid4(),
created_at=datetime.datetime.now(datetime.timezone.utc),
),
]
@patch("langsmith.Client.list_examples", MagicMock(return_value=iter(EXAMPLES)))
def test_lazy_load() -> None:
loader = LangSmithLoader(
api_key="dummy",
dataset_id="mock",
content_key="first.second",
format_content=(lambda x: x.upper()),
)
expected = []
for example in EXAMPLES:
metadata = {
k: v if not v or isinstance(v, dict) else str(v)
for k, v in example.dict().items()
}
expected.append(
Document(example.inputs["first"]["second"].upper(), metadata=metadata)
)
actual = list(loader.lazy_load())
assert expected == actual
|
import pytest
from llama_index.core.node_parser.text.semantic_double_merging_splitter import (
SemanticDoubleMergingSplitterNodeParser,
LanguageConfig,
)
from llama_index.core.schema import Document
doc = Document(
text="Warsaw: Warsaw, the capital city of Poland, is a bustling metropolis located on the banks of the Vistula River. "
"It is known for its rich history, vibrant culture, and resilient spirit. Warsaw's skyline is characterized by a mix of historic architecture and modern skyscrapers. "
"The Old Town, with its cobblestone streets and colorful buildings, is a UNESCO World Heritage Site.\n\n"
"Football: Football, also known as soccer, is a popular sport played by millions of people worldwide. "
"It is a team sport that involves two teams of eleven players each. The objective of the game is to score goals by kicking the ball into the opposing team's goal. "
"Football matches are typically played on a rectangular field called a pitch, with goals at each end. "
"The game is governed by a set of rules known as the Laws of the Game. Football is known for its passionate fanbase and intense rivalries between clubs and countries. "
"The FIFA World Cup is the most prestigious international football tournament.\n\n"
"Mathematics: Mathematics is a fundamental discipline that deals with the study of numbers, quantities, and shapes. "
"Its branches include algebra, calculus, geometry, and statistics."
)
doc_same = Document(
text="Krakow is one of the oldest and largest cities in Poland, located in the southern part of the country on the Vistula River. "
* 20
)
try:
splitter = SemanticDoubleMergingSplitterNodeParser(
initial_threshold=0.7,
appending_threshold=0.8,
merging_threshold=0.7,
max_chunk_size=1000,
)
splitter.language_config.load_model()
spacy_available = True
except Exception:
spacy_available = False
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_number_of_returned_nodes() -> None:
nodes = splitter.get_nodes_from_documents([doc])
assert len(nodes) == 2
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_creating_initial_chunks() -> None:
text = doc.text
sentences = splitter.sentence_splitter(text)
initial_chunks = splitter._create_initial_chunks(sentences)
assert len(initial_chunks) == 4
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_config_models() -> None:
with pytest.raises(ValueError):
LanguageConfig(language="polish")
with pytest.raises(ValueError):
LanguageConfig(language="polish", spacy_model="en_core_web_md")
with pytest.raises(ValueError):
LanguageConfig(language="french", spacy_model="en_core_web_md")
with pytest.raises(ValueError):
LanguageConfig(language="empty", spacy_model="empty")
LanguageConfig(language="english", spacy_model="en_core_web_md")
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_chunk_size_1() -> None:
splitter.max_chunk_size = 0
nodes = splitter.get_nodes_from_documents([doc])
# length of each sentence
assert len(nodes) == 13
assert len(nodes[0].get_content()) == 111
assert len(nodes[1].get_content()) == 72
assert len(nodes[2].get_content()) == 91
assert len(nodes[3].get_content()) == 99
assert len(nodes[4].get_content()) == 100
assert len(nodes[5].get_content()) == 66
assert len(nodes[6].get_content()) == 94
assert len(nodes[7].get_content()) == 100
assert len(nodes[8].get_content()) == 69
assert len(nodes[9].get_content()) == 95
assert len(nodes[10].get_content()) == 77
assert len(nodes[11].get_content()) == 114
assert len(nodes[12].get_content()) == 65
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_chunk_size_2() -> None:
splitter.max_chunk_size = 200
nodes = splitter.get_nodes_from_documents([doc])
for node in nodes:
assert len(node.get_content()) < 200
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_chunk_size_3() -> None:
splitter.max_chunk_size = 500
nodes = splitter.get_nodes_from_documents([doc_same])
for node in nodes:
assert len(node.get_content()) < 500
|
import pytest
from llama_index.core.node_parser.text.semantic_double_merging_splitter import (
SemanticDoubleMergingSplitterNodeParser,
LanguageConfig,
)
from llama_index.core.schema import Document
doc = Document(
text="Warsaw: Warsaw, the capital city of Poland, is a bustling metropolis located on the banks of the Vistula River. "
"It is known for its rich history, vibrant culture, and resilient spirit. Warsaw's skyline is characterized by a mix of historic architecture and modern skyscrapers. "
"The Old Town, with its cobblestone streets and colorful buildings, is a UNESCO World Heritage Site.\n\n"
"Football: Football, also known as soccer, is a popular sport played by millions of people worldwide. "
"It is a team sport that involves two teams of eleven players each. The objective of the game is to score goals by kicking the ball into the opposing team's goal. "
"Football matches are typically played on a rectangular field called a pitch, with goals at each end. "
"The game is governed by a set of rules known as the Laws of the Game. Football is known for its passionate fanbase and intense rivalries between clubs and countries. "
"The FIFA World Cup is the most prestigious international football tournament.\n\n"
"Mathematics: Mathematics is a fundamental discipline that deals with the study of numbers, quantities, and shapes. "
"Its branches include algebra, calculus, geometry, and statistics."
)
doc_same = Document(
text="Krakow is one of the oldest and largest cities in Poland, located in the southern part of the country on the Vistula River. "
* 20
)
try:
splitter = SemanticDoubleMergingSplitterNodeParser(
initial_threshold=0.7,
appending_threshold=0.8,
merging_threshold=0.7,
max_chunk_size=1000,
)
splitter.language_config.load_model()
spacy_available = True
except Exception:
spacy_available = False
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_number_of_returned_nodes() -> None:
nodes = splitter.get_nodes_from_documents([doc])
assert len(nodes) == 4
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_creating_initial_chunks() -> None:
text = doc.text
sentences = splitter.sentence_splitter(text)
initial_chunks = splitter._create_initial_chunks(sentences)
assert len(initial_chunks) == 9
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_config_models() -> None:
with pytest.raises(ValueError):
LanguageConfig(language="polish")
with pytest.raises(ValueError):
LanguageConfig(language="polish", spacy_model="en_core_web_md")
with pytest.raises(ValueError):
LanguageConfig(language="french", spacy_model="en_core_web_md")
with pytest.raises(ValueError):
LanguageConfig(language="empty", spacy_model="empty")
LanguageConfig(language="english", spacy_model="en_core_web_md")
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_chunk_size_1() -> None:
splitter.max_chunk_size = 0
nodes = splitter.get_nodes_from_documents([doc])
# length of each sentence
assert len(nodes) == 13
assert len(nodes[0].get_content()) == 111
assert len(nodes[1].get_content()) == 72
assert len(nodes[2].get_content()) == 91
assert len(nodes[3].get_content()) == 99
assert len(nodes[4].get_content()) == 100
assert len(nodes[5].get_content()) == 66
assert len(nodes[6].get_content()) == 94
assert len(nodes[7].get_content()) == 100
assert len(nodes[8].get_content()) == 69
assert len(nodes[9].get_content()) == 95
assert len(nodes[10].get_content()) == 77
assert len(nodes[11].get_content()) == 114
assert len(nodes[12].get_content()) == 65
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_chunk_size_2() -> None:
splitter.max_chunk_size = 200
nodes = splitter.get_nodes_from_documents([doc])
for node in nodes:
assert len(node.get_content()) < 200
@pytest.mark.skipif(not spacy_available, reason="Spacy model not available")
def test_chunk_size_3() -> None:
splitter.max_chunk_size = 500
nodes = splitter.get_nodes_from_documents([doc_same])
for node in nodes:
assert len(node.get_content()) < 500
|
from keras.src.api_export import keras_export
from keras.src.optimizers import adam
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.AdamW"])
class AdamW(adam.Adam):
"""Optimizer that implements the AdamW algorithm.
AdamW optimization is a stochastic gradient descent method that is based on
adaptive estimation of first-order and second-order moments with an added
method to decay weights per the techniques discussed in the paper,
'Decoupled Weight Decay Regularization' by
[Loshchilov, Hutter et al., 2019](https://arxiv.org/abs/1711.05101).
According to
[Kingma et al., 2014](http://arxiv.org/abs/1412.6980),
the underlying Adam method is "*computationally
efficient, has little memory requirement, invariant to diagonal rescaling of
gradients, and is well suited for problems that are large in terms of
data/parameters*".
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates.
Defaults to `0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 2nd moment estimates.
Defaults to `0.999`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just
before Section 2.1), not the epsilon in Algorithm 1 of the paper.
Defaults to 1e-7.
amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm
from the paper "On the Convergence of Adam and beyond".
Defaults to `False`.
{{base_optimizer_keyword_args}}
References:
- [Loshchilov et al., 2019](https://arxiv.org/abs/1711.05101)
- [Kingma et al., 2014](http://arxiv.org/abs/1412.6980) for `adam`
- [Reddi et al., 2018](
https://openreview.net/pdf?id=ryQu7f-RZ) for `amsgrad`.
"""
def __init__(
self,
learning_rate=0.001,
weight_decay=0.004,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adamw",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
amsgrad=amsgrad,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
if self.weight_decay is None:
raise ValueError(
"Argument `weight_decay` must be a float. Received: "
"weight_decay=None"
)
AdamW.__doc__ = AdamW.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
from keras.src.api_export import keras_export
from keras.src.optimizers import adam
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.AdamW"])
class AdamW(adam.Adam):
"""Optimizer that implements the AdamW algorithm.
AdamW optimization is a stochastic gradient descent method that is based on
adaptive estimation of first-order and second-order moments with an added
method to decay weights per the techniques discussed in the paper,
'Decoupled Weight Decay Regularization' by
[Loshchilov, Hutter et al., 2019](https://arxiv.org/abs/1711.05101).
According to
[Kingma et al., 2014](http://arxiv.org/abs/1412.6980),
the underying Adam method is "*computationally
efficient, has little memory requirement, invariant to diagonal rescaling of
gradients, and is well suited for problems that are large in terms of
data/parameters*".
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates.
Defaults to `0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 2nd moment estimates.
Defaults to `0.999`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just
before Section 2.1), not the epsilon in Algorithm 1 of the paper.
Defaults to 1e-7.
amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm
from the paper "On the Convergence of Adam and beyond".
Defaults to `False`.
{{base_optimizer_keyword_args}}
References:
- [Loshchilov et al., 2019](https://arxiv.org/abs/1711.05101)
- [Kingma et al., 2014](http://arxiv.org/abs/1412.6980) for `adam`
- [Reddi et al., 2018](
https://openreview.net/pdf?id=ryQu7f-RZ) for `amsgrad`.
"""
def __init__(
self,
learning_rate=0.001,
weight_decay=0.004,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adamw",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
amsgrad=amsgrad,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
if self.weight_decay is None:
raise ValueError(
"Argument `weight_decay` must be a float. Received: "
"weight_decay=None"
)
AdamW.__doc__ = AdamW.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
_base_ = 'grounding_dino_swin-t_pretrain_obj365.py'
o365v1_od_dataset = dict(
type='ODVGDataset',
data_root='data/objects365v1/',
ann_file='o365v1_train_odvg.json',
label_map_file='o365v1_label_map.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None,
)
flickr30k_dataset = dict(
type='ODVGDataset',
data_root='data/flickr30k_entities/',
ann_file='final_flickr_separateGT_train_vg.json',
label_map_file=None,
data_prefix=dict(img='flickr30k_images/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
gqa_dataset = dict(
type='ODVGDataset',
data_root='data/gqa/',
ann_file='final_mixed_train_no_coco_vg.json',
label_map_file=None,
data_prefix=dict(img='images/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
v3d_train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
]
]),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(
type='RandomSamplingNegPos',
tokenizer_name=_base_.lang_model_name,
num_sample_negative=85,
# change this
label_map_file='data/V3Det/annotations/v3det_2023_v1_label_map.json',
max_tokens=256),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction', 'text',
'custom_entities', 'tokens_positive', 'dataset_mode'))
]
v3det_dataset = dict(
type='ODVGDataset',
data_root='data/V3Det/',
ann_file='annotations/v3det_2023_v1_train_od.json',
label_map_file='annotations/v3det_2023_v1_label_map.json',
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=False),
need_text=False, # change this
pipeline=v3d_train_pipeline,
return_classes=True,
backend_args=None)
grit_dataset = dict(
type='ODVGDataset',
data_root='grit_processed/',
ann_file='grit20m_vg.json',
label_map_file=None,
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
train_dataloader = dict(
sampler=dict(
_delete_=True,
type='CustomSampleSizeSampler',
dataset_size=[-1, -1, -1, -1, 500000]),
dataset=dict(datasets=[
o365v1_od_dataset, flickr30k_dataset, gqa_dataset, v3det_dataset,
grit_dataset
]))
|
_base_ = 'grounding_dino_swin-t_pretrain_obj365.py'
o365v1_od_dataset = dict(
type='ODVGDataset',
data_root='data/objects365v1/',
ann_file='o365v1_train_odvg.jsonl',
label_map_file='o365v1_label_map.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None,
)
flickr30k_dataset = dict(
type='ODVGDataset',
data_root='data/flickr30k_entities/',
ann_file='final_flickr_separateGT_train_vg.json',
label_map_file=None,
data_prefix=dict(img='flickr30k_images/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
gqa_dataset = dict(
type='ODVGDataset',
data_root='data/gqa/',
ann_file='final_mixed_train_no_coco_vg.json',
label_map_file=None,
data_prefix=dict(img='images/'),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
v3d_train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
]
]),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(
type='RandomSamplingNegPos',
tokenizer_name=_base_.lang_model_name,
num_sample_negative=85,
# change this
label_map_file='data/V3Det/annotations/v3det_2023_v1_label_map.json',
max_tokens=256),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction', 'text',
'custom_entities', 'tokens_positive', 'dataset_mode'))
]
v3det_dataset = dict(
type='ODVGDataset',
data_root='data/V3Det/',
ann_file='annotations/v3det_2023_v1_train_od.json',
label_map_file='annotations/v3det_2023_v1_label_map.json',
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=False),
need_text=False, # change this
pipeline=v3d_train_pipeline,
return_classes=True,
backend_args=None)
grit_dataset = dict(
type='ODVGDataset',
data_root='grit_processed/',
ann_file='grit20m_vg.json',
label_map_file=None,
data_prefix=dict(img=''),
filter_cfg=dict(filter_empty_gt=False),
pipeline=_base_.train_pipeline,
return_classes=True,
backend_args=None)
train_dataloader = dict(
sampler=dict(
_delete_=True,
type='CustomSampleSizeSampler',
dataset_size=[-1, -1, -1, -1, 500000]),
dataset=dict(datasets=[
o365v1_od_dataset, flickr30k_dataset, gqa_dataset, v3det_dataset,
grit_dataset
]))
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Optional
import spacy
from docarray import DocumentArray
from jina import Executor, requests
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
download_data: bool = True,
traversal_paths: str = '@r',
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param model_name: pre-trained spaCy language pipeline name
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
:param device: device to use for encoding. ['cuda', 'cpu', 'cuda:2']
"""
super().__init__(*args, **kwargs)
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.device = device
if device.startswith('cuda'):
spacy.require_gpu()
if download_data:
subprocess.run(
['python3', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: Dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': '@r', 'batch_size': 10}``
"""
if self.device.startswith('cuda'):
from cupy import asnumpy
if docs:
trav_path = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
docs_batch_generator = DocumentArray(
filter(
lambda x: bool(x.text),
docs[trav_path],
)
).batch(batch_size=parameters.get('batch_size', self.batch_size))
for document_batch in docs_batch_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
if self.device.startswith('cuda'):
doc.embedding = asnumpy(spacy_doc.vector)
else:
doc.embedding = spacy_doc.vector
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
download_data: bool = True,
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param model_name: pre-trained spaCy language pipeline name
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
:param device: device to use for encoding. ['cuda', 'cpu', 'cuda:2']
"""
super().__init__(*args, **kwargs)
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.device = device
if device.startswith('cuda'):
spacy.require_gpu()
if download_data:
subprocess.run(
['python3', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: Dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if self.device.startswith('cuda'):
from cupy import asnumpy
if docs:
batch_size = parameters.get('batch_size', self.batch_size)
document_batches_generator = docs.traverse_flat(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
filter_fn=lambda doc: len(doc.text) > 0,
).batch(
batch_size=batch_size,
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
if self.device.startswith('cuda'):
doc.embedding = asnumpy(spacy_doc.vector)
else:
doc.embedding = spacy_doc.vector
|
from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
)
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[ImageDoc]
class Middle(BaseDocument):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[ImageDoc]
middle: Optional[Middle]
da: DocumentArray[Inner]
doc = Outer(
img=ImageDoc(),
middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc())),
da=DocumentArray[Inner]([Inner(img=ImageDoc(url='test.png'))]),
)
return doc
def test_is_access_path_valid(nested_doc):
assert _is_access_path_valid(nested_doc.__class__, 'img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__inner__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle')
assert _is_access_path_valid(nested_doc.__class__, 'da__img__url')
def test_is_access_path_not_valid(nested_doc):
assert not _is_access_path_valid(nested_doc.__class__, 'inner')
assert not _is_access_path_valid(nested_doc.__class__, 'some__other__path')
assert not _is_access_path_valid(nested_doc.__class__, 'middle.inner')
def test_get_access_paths():
class Painting(BaseDocument):
title: str
img: ImageDoc
access_paths = Painting._get_access_paths()
assert access_paths == [
'id',
'title',
'img__id',
'img__url',
'img__tensor',
'img__embedding',
'img__bytes',
]
def test_dict_to_access_paths():
d = {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
casted = _dict_to_access_paths(d)
assert casted == {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
def test_access_path_to_dict():
access_path = 'a__b__c__d__e'
value = 1
result = {'a': {'b': {'c': {'d': {'e': value}}}}}
assert _access_path_to_dict(access_path, value) == result
def test_access_path_dict_to_nested_dict():
d = {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
casted = _access_path_dict_to_nested_dict(d)
assert casted == {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
def test_update_nested_dict():
d1 = {'text': 'hello', 'image': {'tensor': None}}
d2 = {'image': {'url': 'some.png'}}
_update_nested_dicts(d1, d2)
assert d1 == {'text': 'hello', 'image': {'tensor': None, 'url': 'some.png'}}
|
from typing import Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.helper import (
_access_path_dict_to_nested_dict,
_access_path_to_dict,
_dict_to_access_paths,
_is_access_path_valid,
_update_nested_dicts,
)
@pytest.fixture()
def nested_doc():
class Inner(BaseDocument):
img: Optional[Image]
class Middle(BaseDocument):
img: Optional[Image]
inner: Optional[Inner]
class Outer(BaseDocument):
img: Optional[Image]
middle: Optional[Middle]
da: DocumentArray[Inner]
doc = Outer(
img=Image(),
middle=Middle(img=Image(), inner=Inner(img=Image())),
da=DocumentArray[Inner]([Inner(img=Image(url='test.png'))]),
)
return doc
def test_is_access_path_valid(nested_doc):
assert _is_access_path_valid(nested_doc.__class__, 'img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle__inner__img')
assert _is_access_path_valid(nested_doc.__class__, 'middle')
assert _is_access_path_valid(nested_doc.__class__, 'da__img__url')
def test_is_access_path_not_valid(nested_doc):
assert not _is_access_path_valid(nested_doc.__class__, 'inner')
assert not _is_access_path_valid(nested_doc.__class__, 'some__other__path')
assert not _is_access_path_valid(nested_doc.__class__, 'middle.inner')
def test_get_access_paths():
class Painting(BaseDocument):
title: str
img: Image
access_paths = Painting._get_access_paths()
assert access_paths == [
'id',
'title',
'img__id',
'img__url',
'img__tensor',
'img__embedding',
'img__bytes',
]
def test_dict_to_access_paths():
d = {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
casted = _dict_to_access_paths(d)
assert casted == {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
def test_access_path_to_dict():
access_path = 'a__b__c__d__e'
value = 1
result = {'a': {'b': {'c': {'d': {'e': value}}}}}
assert _access_path_to_dict(access_path, value) == result
def test_access_path_dict_to_nested_dict():
d = {
'a0__b0__c0': 0,
'a0__b1__c0': 1,
'a1__b0__c0': 2,
'a1__b0__c1': 3,
'a1__b1': 4,
}
casted = _access_path_dict_to_nested_dict(d)
assert casted == {
'a0': {'b0': {'c0': 0}, 'b1': {'c0': 1}},
'a1': {'b0': {'c0': 2, 'c1': 3}, 'b1': 4},
}
def test_update_nested_dict():
d1 = {'text': 'hello', 'image': {'tensor': None}}
d2 = {'image': {'url': 'some.png'}}
_update_nested_dicts(d1, d2)
assert d1 == {'text': 'hello', 'image': {'tensor': None, 'url': 'some.png'}}
|
import os
import urllib
import numpy as np
import PIL
import pytest
from PIL import Image
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.proto
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(ImageUrl)
def test_dump_json():
url = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load_pil(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
img = url.load_pil()
assert isinstance(img, PIL.Image.Image)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_bytes()
assert isinstance(_bytes, bytes)
img = Image.frombytes(mode='1', size=(224, 224), data=_bytes)
assert isinstance(img, Image.Image)
@pytest.mark.parametrize(
'path_to_img',
[*IMAGE_PATHS.values(), REMOTE_JPG],
)
def test_validation(path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
import os
import urllib
import numpy as np
import PIL
import pytest
from PIL import Image
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.proto
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(ImageUrl)
def test_dump_json():
url = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load_pil(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
img = url.load_pil()
assert isinstance(img, PIL.Image.Image)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_bytes()
assert isinstance(_bytes, bytes)
img = Image.frombytes(mode='1', size=(224, 224), data=_bytes)
assert isinstance(img, Image.Image)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
],
)
def test_validation(image_format, path_to_img):
if image_format == 'illegal':
with pytest.raises(ValueError):
parse_obj_as(ImageUrl, path_to_img)
else:
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, MixUp, Mosaic,
Normalize, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomFlip,
RandomShift, Resize, SegRescale, YOLOXHSVRandomAug)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formating import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, MixUp, Mosaic,
Normalize, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomFlip,
RandomShift, Resize, SegRescale, YOLOXHSVRandomAug)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug'
]
|
"""**Text Splitters** are classes for splitting text.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> TextSplitter --> <name>TextSplitter # Example: CharacterTextSplitter
RecursiveCharacterTextSplitter --> <name>TextSplitter
Note: **MarkdownHeaderTextSplitter** and **HTMLHeaderTextSplitter do not derive from TextSplitter.
**Main helpers:**
.. code-block::
Document, Tokenizer, Language, LineType, HeaderType
""" # noqa: E501
from langchain_text_splitters.base import (
Language,
TextSplitter,
Tokenizer,
TokenTextSplitter,
split_text_on_tokens,
)
from langchain_text_splitters.character import (
CharacterTextSplitter,
RecursiveCharacterTextSplitter,
)
from langchain_text_splitters.html import (
ElementType,
HTMLHeaderTextSplitter,
HTMLSectionSplitter,
HTMLSemanticPreservingSplitter,
)
from langchain_text_splitters.json import RecursiveJsonSplitter
from langchain_text_splitters.jsx import JSFrameworkTextSplitter
from langchain_text_splitters.konlpy import KonlpyTextSplitter
from langchain_text_splitters.latex import LatexTextSplitter
from langchain_text_splitters.markdown import (
ExperimentalMarkdownSyntaxTextSplitter,
HeaderType,
LineType,
MarkdownHeaderTextSplitter,
MarkdownTextSplitter,
)
from langchain_text_splitters.nltk import NLTKTextSplitter
from langchain_text_splitters.python import PythonCodeTextSplitter
from langchain_text_splitters.sentence_transformers import (
SentenceTransformersTokenTextSplitter,
)
from langchain_text_splitters.spacy import SpacyTextSplitter
__all__ = [
"TokenTextSplitter",
"TextSplitter",
"Tokenizer",
"Language",
"RecursiveCharacterTextSplitter",
"RecursiveJsonSplitter",
"LatexTextSplitter",
"JSFrameworkTextSplitter",
"PythonCodeTextSplitter",
"KonlpyTextSplitter",
"SpacyTextSplitter",
"NLTKTextSplitter",
"split_text_on_tokens",
"SentenceTransformersTokenTextSplitter",
"ElementType",
"HeaderType",
"LineType",
"HTMLHeaderTextSplitter",
"HTMLSectionSplitter",
"HTMLSemanticPreservingSplitter",
"MarkdownHeaderTextSplitter",
"MarkdownTextSplitter",
"CharacterTextSplitter",
"ExperimentalMarkdownSyntaxTextSplitter",
]
|
"""**Text Splitters** are classes for splitting text.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> TextSplitter --> <name>TextSplitter # Example: CharacterTextSplitter
RecursiveCharacterTextSplitter --> <name>TextSplitter
Note: **MarkdownHeaderTextSplitter** and **HTMLHeaderTextSplitter do not derive from TextSplitter.
**Main helpers:**
.. code-block::
Document, Tokenizer, Language, LineType, HeaderType
""" # noqa: E501
from langchain_text_splitters.base import (
Language,
TextSplitter,
Tokenizer,
TokenTextSplitter,
split_text_on_tokens,
)
from langchain_text_splitters.character import (
CharacterTextSplitter,
RecursiveCharacterTextSplitter,
)
from langchain_text_splitters.html import (
ElementType,
HTMLHeaderTextSplitter,
HTMLSectionSplitter,
HTMLSemanticPreservingSplitter,
)
from langchain_text_splitters.json import RecursiveJsonSplitter
from langchain_text_splitters.konlpy import KonlpyTextSplitter
from langchain_text_splitters.latex import LatexTextSplitter
from langchain_text_splitters.markdown import (
ExperimentalMarkdownSyntaxTextSplitter,
HeaderType,
LineType,
MarkdownHeaderTextSplitter,
MarkdownTextSplitter,
)
from langchain_text_splitters.nltk import NLTKTextSplitter
from langchain_text_splitters.python import PythonCodeTextSplitter
from langchain_text_splitters.sentence_transformers import (
SentenceTransformersTokenTextSplitter,
)
from langchain_text_splitters.spacy import SpacyTextSplitter
__all__ = [
"TokenTextSplitter",
"TextSplitter",
"Tokenizer",
"Language",
"RecursiveCharacterTextSplitter",
"RecursiveJsonSplitter",
"LatexTextSplitter",
"PythonCodeTextSplitter",
"KonlpyTextSplitter",
"SpacyTextSplitter",
"NLTKTextSplitter",
"split_text_on_tokens",
"SentenceTransformersTokenTextSplitter",
"ElementType",
"HeaderType",
"LineType",
"HTMLHeaderTextSplitter",
"HTMLSectionSplitter",
"HTMLSemanticPreservingSplitter",
"MarkdownHeaderTextSplitter",
"MarkdownTextSplitter",
"CharacterTextSplitter",
"ExperimentalMarkdownSyntaxTextSplitter",
]
|
"""Test for Serializable base class"""
import json
import os
from typing import Any
from unittest.mock import patch
import pytest
from langchain_core.load.dump import dumps
from langchain_core.load.serializable import Serializable
from pydantic import ConfigDict, Field, model_validator
class Person(Serializable):
secret: str
you_can_see_me: str = "hello"
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@property
def lc_secrets(self) -> dict[str, str]:
return {"secret": "SECRET"}
@property
def lc_attributes(self) -> dict[str, str]:
return {"you_can_see_me": self.you_can_see_me}
class SpecialPerson(Person):
another_secret: str
another_visible: str = "bye"
@classmethod
def get_lc_namespace(cls) -> list[str]:
return ["my", "special", "namespace"]
# Gets merged with parent class's secrets
@property
def lc_secrets(self) -> dict[str, str]:
return {"another_secret": "ANOTHER_SECRET"}
# Gets merged with parent class's attributes
@property
def lc_attributes(self) -> dict[str, str]:
return {"another_visible": self.another_visible}
class NotSerializable:
pass
def test_person(snapshot: Any) -> None:
p = Person(secret="parrot party") # noqa: S106
assert dumps(p, pretty=True) == snapshot
sp = SpecialPerson(another_secret="Wooo", secret="Hmm") # noqa: S106
assert dumps(sp, pretty=True) == snapshot
assert Person.lc_id() == ["tests", "unit_tests", "load", "test_dump", "Person"]
assert SpecialPerson.lc_id() == ["my", "special", "namespace", "SpecialPerson"]
def test_typeerror() -> None:
assert (
dumps({(1, 2): 3})
== """{"lc": 1, "type": "not_implemented", "id": ["builtins", "dict"], "repr": "{(1, 2): 3}"}""" # noqa: E501
)
def test_person_with_kwargs(snapshot: Any) -> None:
person = Person(secret="parrot party") # noqa: S106
assert dumps(person, separators=(",", ":")) == snapshot
def test_person_with_invalid_kwargs() -> None:
person = Person(secret="parrot party") # noqa: S106
with pytest.raises(TypeError):
dumps(person, invalid_kwarg="hello")
class TestClass(Serializable):
my_favorite_secret: str = Field(alias="my_favorite_secret_alias")
my_other_secret: str = Field()
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def get_from_env(cls, values: dict) -> Any:
"""Get the values from the environment."""
if "my_favorite_secret" not in values:
values["my_favorite_secret"] = os.getenv("MY_FAVORITE_SECRET")
if "my_other_secret" not in values:
values["my_other_secret"] = os.getenv("MY_OTHER_SECRET")
return values
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
return ["my", "special", "namespace"]
@property
def lc_secrets(self) -> dict[str, str]:
return {
"my_favorite_secret": "MY_FAVORITE_SECRET",
"my_other_secret": "MY_OTHER_SECRET",
}
def test_aliases_hidden() -> None:
test_class = TestClass(
my_favorite_secret="hello", # noqa: S106 # type: ignore[call-arg]
my_other_secret="world", # noqa: S106
) # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
expected_dump = {
"lc": 1,
"type": "constructor",
"id": ["my", "special", "namespace", "TestClass"],
"kwargs": {
"my_favorite_secret": {
"lc": 1,
"type": "secret",
"id": ["MY_FAVORITE_SECRET"],
},
"my_other_secret": {"lc": 1, "type": "secret", "id": ["MY_OTHER_SECRET"]},
},
}
assert dumped == expected_dump
# Check while patching the os environment
with patch.dict(
os.environ, {"MY_FAVORITE_SECRET": "hello", "MY_OTHER_SECRET": "world"}
):
test_class = TestClass() # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
# Check by alias
test_class = TestClass(
my_favorite_secret_alias="hello", # noqa: S106
my_other_secret="parrot party", # noqa: S106
)
dumped = json.loads(dumps(test_class, pretty=True))
expected_dump = {
"lc": 1,
"type": "constructor",
"id": ["my", "special", "namespace", "TestClass"],
"kwargs": {
"my_favorite_secret": {
"lc": 1,
"type": "secret",
"id": ["MY_FAVORITE_SECRET"],
},
"my_other_secret": {"lc": 1, "type": "secret", "id": ["MY_OTHER_SECRET"]},
},
}
assert dumped == expected_dump
|
"""Test for Serializable base class"""
import json
import os
from typing import Any
from unittest.mock import patch
import pytest
from langchain_core.load.dump import dumps
from langchain_core.load.serializable import Serializable
from pydantic import ConfigDict, Field, model_validator
class Person(Serializable):
secret: str
you_can_see_me: str = "hello"
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@property
def lc_secrets(self) -> dict[str, str]:
return {"secret": "SECRET"}
@property
def lc_attributes(self) -> dict[str, str]:
return {"you_can_see_me": self.you_can_see_me}
class SpecialPerson(Person):
another_secret: str
another_visible: str = "bye"
@classmethod
def get_lc_namespace(cls) -> list[str]:
return ["my", "special", "namespace"]
# Gets merged with parent class's secrets
@property
def lc_secrets(self) -> dict[str, str]:
return {"another_secret": "ANOTHER_SECRET"}
# Gets merged with parent class's attributes
@property
def lc_attributes(self) -> dict[str, str]:
return {"another_visible": self.another_visible}
class NotSerializable:
pass
def test_person(snapshot: Any) -> None:
p = Person(secret="hello")
assert dumps(p, pretty=True) == snapshot
sp = SpecialPerson(another_secret="Wooo", secret="Hmm")
assert dumps(sp, pretty=True) == snapshot
assert Person.lc_id() == ["tests", "unit_tests", "load", "test_dump", "Person"]
assert SpecialPerson.lc_id() == ["my", "special", "namespace", "SpecialPerson"]
def test_typeerror() -> None:
assert (
dumps({(1, 2): 3})
== """{"lc": 1, "type": "not_implemented", "id": ["builtins", "dict"], "repr": "{(1, 2): 3}"}""" # noqa: E501
)
def test_person_with_kwargs(snapshot: Any) -> None:
person = Person(secret="hello")
assert dumps(person, separators=(",", ":")) == snapshot
def test_person_with_invalid_kwargs() -> None:
person = Person(secret="hello")
with pytest.raises(TypeError):
dumps(person, invalid_kwarg="hello")
class TestClass(Serializable):
my_favorite_secret: str = Field(alias="my_favorite_secret_alias")
my_other_secret: str = Field()
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def get_from_env(cls, values: dict) -> Any:
"""Get the values from the environment."""
if "my_favorite_secret" not in values:
values["my_favorite_secret"] = os.getenv("MY_FAVORITE_SECRET")
if "my_other_secret" not in values:
values["my_other_secret"] = os.getenv("MY_OTHER_SECRET")
return values
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
return ["my", "special", "namespace"]
@property
def lc_secrets(self) -> dict[str, str]:
return {
"my_favorite_secret": "MY_FAVORITE_SECRET",
"my_other_secret": "MY_OTHER_SECRET",
}
def test_aliases_hidden() -> None:
test_class = TestClass(my_favorite_secret="hello", my_other_secret="world") # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
expected_dump = {
"lc": 1,
"type": "constructor",
"id": ["my", "special", "namespace", "TestClass"],
"kwargs": {
"my_favorite_secret": {
"lc": 1,
"type": "secret",
"id": ["MY_FAVORITE_SECRET"],
},
"my_other_secret": {"lc": 1, "type": "secret", "id": ["MY_OTHER_SECRET"]},
},
}
assert dumped == expected_dump
# Check while patching the os environment
with patch.dict(
os.environ, {"MY_FAVORITE_SECRET": "hello", "MY_OTHER_SECRET": "world"}
):
test_class = TestClass() # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
# Check by alias
test_class = TestClass(my_favorite_secret_alias="hello", my_other_secret="world")
dumped = json.loads(dumps(test_class, pretty=True))
expected_dump = {
"lc": 1,
"type": "constructor",
"id": ["my", "special", "namespace", "TestClass"],
"kwargs": {
"my_favorite_secret": {
"lc": 1,
"type": "secret",
"id": ["MY_FAVORITE_SECRET"],
},
"my_other_secret": {"lc": 1, "type": "secret", "id": ["MY_OTHER_SECRET"]},
},
}
assert dumped == expected_dump
|
import pytest
from importlib.util import find_spec
from llama_index.core.storage.kvstore.types import BaseKVStore
from llama_index.storage.kvstore.postgres import PostgresKVStore
no_packages = (
find_spec("psycopg2") is None
or find_spec("sqlalchemy") is None
or find_spec("asyncpg") is None
)
def test_class():
names_of_base_classes = [b.__name__ for b in PostgresKVStore.__mro__]
assert BaseKVStore.__name__ in names_of_base_classes
@pytest.mark.skipif(
no_packages, reason="asyncpg, pscopg2-binary and sqlalchemy not installed"
)
def test_initialization():
errors = []
try:
pgstore1 = PostgresKVStore(table_name="mytable")
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore2 = PostgresKVStore(
table_name="mytable", connection_string="connection_string"
)
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore3 = PostgresKVStore(
table_name="mytable", async_connection_string="async_connection_string"
)
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore4 = PostgresKVStore(
table_name="mytable",
connection_string="connection_string",
async_connection_string="async_connection_string",
)
errors.append(0)
except ValueError:
errors.append(1)
assert sum(errors) == 3
assert pgstore4._engine is None
assert pgstore4._async_engine is None
|
import pytest
from importlib.util import find_spec
from llama_index.core.storage.kvstore.types import BaseKVStore
from llama_index.storage.kvstore.postgres import PostgresKVStore
no_packages = find_spec("psycopg2") is None or find_spec("sqlalchemy") is None or find_spec("asyncpg") is None
def test_class():
names_of_base_classes = [b.__name__ for b in PostgresKVStore.__mro__]
assert BaseKVStore.__name__ in names_of_base_classes
@pytest.mark.skipif(
no_packages, reason="asyncpg, pscopg2-binary and sqlalchemy not installed"
)
def test_initialization():
errors = []
try:
pgstore1 = PostgresKVStore(table_name="mytable")
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore2 = PostgresKVStore(table_name="mytable", connection_string="connection_string")
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore3 = PostgresKVStore(table_name="mytable", async_connection_string="async_connection_string")
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore4 = PostgresKVStore(table_name="mytable", connection_string="connection_string", async_connection_string="async_connection_string")
errors.append(0)
except ValueError:
errors.append(1)
assert sum(errors) == 3
assert pgstore4._engine is None
assert pgstore4._async_engine is None
|
import re
from collections.abc import Sequence
from typing import Optional
from langchain_core.messages import BaseMessage
def _is_openai_data_block(block: dict) -> bool:
"""Check if the block contains multimodal data in OpenAI Chat Completions format."""
if block.get("type") == "image_url":
if (
(set(block.keys()) <= {"type", "image_url", "detail"})
and (image_url := block.get("image_url"))
and isinstance(image_url, dict)
):
url = image_url.get("url")
if isinstance(url, str):
return True
elif block.get("type") == "file":
if (file := block.get("file")) and isinstance(file, dict):
file_data = file.get("file_data")
if isinstance(file_data, str):
return True
elif block.get("type") == "input_audio":
if (input_audio := block.get("input_audio")) and isinstance(input_audio, dict):
audio_data = input_audio.get("data")
audio_format = input_audio.get("format")
if isinstance(audio_data, str) and isinstance(audio_format, str):
return True
else:
return False
return False
def _parse_data_uri(uri: str) -> Optional[dict]:
"""Parse a data URI into its components. If parsing fails, return None.
Example:
.. code-block:: python
data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
parsed = _parse_data_uri(data_uri)
assert parsed == {
"source_type": "base64",
"mime_type": "image/jpeg",
"data": "/9j/4AAQSkZJRg...",
}
"""
regex = r"^data:(?P<mime_type>[^;]+);base64,(?P<data>.+)$"
match = re.match(regex, uri)
if match is None:
return None
return {
"source_type": "base64",
"data": match.group("data"),
"mime_type": match.group("mime_type"),
}
def _convert_openai_format_to_data_block(block: dict) -> dict:
"""Convert OpenAI image content block to standard data content block.
If parsing fails, pass-through.
Args:
block: The OpenAI image content block to convert.
Returns:
The converted standard data content block.
"""
if block["type"] == "image_url":
parsed = _parse_data_uri(block["image_url"]["url"])
if parsed is not None:
parsed["type"] = "image"
return parsed
return block
if block["type"] == "file":
parsed = _parse_data_uri(block["file"]["file_data"])
if parsed is not None:
parsed["type"] = "file"
if filename := block["file"].get("filename"):
parsed["filename"] = filename
return parsed
return block
if block["type"] == "input_audio":
data = block["input_audio"].get("data")
audio_format = block["input_audio"].get("format")
if data and audio_format:
return {
"type": "audio",
"source_type": "base64",
"data": data,
"mime_type": f"audio/{audio_format}",
}
return block
return block
def _normalize_messages(messages: Sequence[BaseMessage]) -> list[BaseMessage]:
"""Extend support for message formats.
Chat models implement support for images in OpenAI Chat Completions format, as well
as other multimodal data as standard data blocks. This function extends support to
audio and file data in OpenAI Chat Completions format by converting them to standard
data blocks.
"""
formatted_messages = []
for message in messages:
formatted_message = message
if isinstance(message.content, list):
for idx, block in enumerate(message.content):
if (
isinstance(block, dict)
# Subset to (PDF) files and audio, as most relevant chat models
# support images in OAI format (and some may not yet support the
# standard data block format)
and block.get("type") in ("file", "input_audio")
and _is_openai_data_block(block)
):
if formatted_message is message:
formatted_message = message.model_copy()
# Also shallow-copy content
formatted_message.content = list(formatted_message.content)
formatted_message.content[idx] = ( # type: ignore[index] # mypy confused by .model_copy
_convert_openai_format_to_data_block(block)
)
formatted_messages.append(formatted_message)
return formatted_messages
|
import re
from collections.abc import Sequence
from typing import Optional
from langchain_core.messages import BaseMessage
def _is_openai_data_block(block: dict) -> bool:
"""Check if the block contains multimodal data in OpenAI Chat Completions format."""
if block.get("type") == "image_url":
if (
(set(block.keys()) <= {"type", "image_url", "detail"})
and (image_url := block.get("image_url"))
and isinstance(image_url, dict)
):
url = image_url.get("url")
if isinstance(url, str):
return True
elif block.get("type") == "file":
if (file := block.get("file")) and isinstance(file, dict):
file_data = file.get("file_data")
if isinstance(file_data, str):
return True
elif block.get("type") == "input_audio":
if (input_audio := block.get("input_audio")) and isinstance(input_audio, dict):
audio_data = input_audio.get("data")
audio_format = input_audio.get("format")
if isinstance(audio_data, str) and isinstance(audio_format, str):
return True
else:
return False
return False
def _parse_data_uri(uri: str) -> Optional[dict]:
"""Parse a data URI into its components. If parsing fails, return None.
Example:
.. code-block:: python
data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
parsed = _parse_data_uri(data_uri)
assert parsed == {
"source_type": "base64",
"mime_type": "image/jpeg",
"data": "/9j/4AAQSkZJRg...",
}
"""
regex = r"^data:(?P<mime_type>[^;]+);base64,(?P<data>.+)$"
match = re.match(regex, uri)
if match is None:
return None
return {
"source_type": "base64",
"data": match.group("data"),
"mime_type": match.group("mime_type"),
}
def _convert_openai_format_to_data_block(block: dict) -> dict:
"""Convert OpenAI image content block to standard data content block.
If parsing fails, pass-through.
Args:
block: The OpenAI image content block to convert.
Returns:
The converted standard data content block.
"""
if block["type"] == "image_url":
parsed = _parse_data_uri(block["image_url"]["url"])
if parsed is not None:
parsed["type"] = "image"
return parsed
return block
if block["type"] == "file":
parsed = _parse_data_uri(block["file"]["file_data"])
if parsed is not None:
parsed["type"] = "file"
if filename := block["file"].get("filename"):
parsed["filename"] = filename
return parsed
return block
if block["type"] == "input_audio":
data = block["input_audio"].get("data")
format = block["input_audio"].get("format")
if data and format:
return {
"type": "audio",
"source_type": "base64",
"data": data,
"mime_type": f"audio/{format}",
}
return block
return block
def _normalize_messages(messages: Sequence[BaseMessage]) -> list[BaseMessage]:
"""Extend support for message formats.
Chat models implement support for images in OpenAI Chat Completions format, as well
as other multimodal data as standard data blocks. This function extends support to
audio and file data in OpenAI Chat Completions format by converting them to standard
data blocks.
"""
formatted_messages = []
for message in messages:
formatted_message = message
if isinstance(message.content, list):
for idx, block in enumerate(message.content):
if (
isinstance(block, dict)
# Subset to (PDF) files and audio, as most relevant chat models
# support images in OAI format (and some may not yet support the
# standard data block format)
and block.get("type") in ("file", "input_audio")
and _is_openai_data_block(block)
):
if formatted_message is message:
formatted_message = message.model_copy()
# Also shallow-copy content
formatted_message.content = list(formatted_message.content)
formatted_message.content[idx] = ( # type: ignore[index] # mypy confused by .model_copy
_convert_openai_format_to_data_block(block)
)
formatted_messages.append(formatted_message)
return formatted_messages
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List
import numpy as np
import pytest
from jina import Flow, Document, DocumentArray
from ...paddle_image import ImagePaddlehubEncoder
@pytest.mark.parametrize('arr_in', [
(np.ones((3, 224, 224), dtype=np.float32)),
(np.ones((3, 100, 100), dtype=np.float32)),
(np.ones((3, 50, 40), dtype=np.float32))
])
def test_paddle_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=DocumentArray([Document(blob=arr_in)]),
return_results=True
)
assert len(results[0].docs) == 1
assert results[0].docs[0].embedding.shape == (2048,)
def test_paddle_batch():
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=(Document(blob=np.ones((3, 224, 224), dtype=np.float32)) for _ in range(25)),
return_results=True
)
assert len(results[0].docs.get_attributes('embedding')) == 25
assert results[0].docs.get_attributes('embedding')[0].shape == (2048,)
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(pytest.lazy_fixture('docs_with_chunk_blobs'), [['r', 0], ['c', 10], ['cc', 0]], 'c'),
(pytest.lazy_fixture('docs_with_chunk_chunk_blobs'), [['r', 0], ['c', 0], ['cc', 10]], 'cc')
]
)
def test_traversal_path(docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: str):
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_paths]},
return_results=True
)
for path, count in docs_per_path:
assert len(DocumentArray(results[0].docs).traverse_flat([path]).get_attributes('embedding')) == count
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List
import numpy as np
import pytest
from jina import Flow, Document, DocumentArray
from jinahub.encoder.paddle_image import ImagePaddlehubEncoder
@pytest.mark.parametrize('arr_in', [
(np.ones((3, 224, 224), dtype=np.float32)),
(np.ones((3, 100, 100), dtype=np.float32)),
(np.ones((3, 50, 40), dtype=np.float32))
])
def test_paddle_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=DocumentArray([Document(blob=arr_in)]),
return_results=True
)
assert len(results[0].docs) == 1
assert results[0].docs[0].embedding.shape == (2048,)
def test_paddle_batch():
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=(Document(blob=np.ones((3, 224, 224), dtype=np.float32)) for _ in range(25)),
return_results=True
)
assert len(results[0].docs.get_attributes('embedding')) == 25
assert results[0].docs.get_attributes('embedding')[0].shape == (2048,)
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(pytest.lazy_fixture('docs_with_chunk_blobs'), [['r', 0], ['c', 10], ['cc', 0]], 'c'),
(pytest.lazy_fixture('docs_with_chunk_chunk_blobs'), [['r', 0], ['c', 0], ['cc', 10]], 'cc')
]
)
def test_traversal_path(docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: str):
flow = Flow().add(uses=ImagePaddlehubEncoder)
with flow:
results = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_paths]},
return_results=True
)
for path, count in docs_per_path:
assert len(DocumentArray(results[0].docs).traverse_flat([path]).get_attributes('embedding')) == count
|
from operator import itemgetter
from typing import Sequence, Iterable
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Implement required and derived functions that power `getitem`, `setitem`, `delitem`"""
# essential methods start
def _del_doc_by_id(self, _id: str):
self._sql(f'DELETE FROM {self._table_name} WHERE doc_id=?', (_id,))
self._save_offset2ids()
self._commit()
def _set_doc_by_id(self, _id: str, value: 'Document'):
self._sql(
f'UPDATE {self._table_name} SET serialized_value=?, doc_id=? WHERE doc_id=?',
(value, value.id, _id),
)
self._commit()
def _get_doc_by_id(self, id: str) -> 'Document':
r = self._sql(
f'SELECT serialized_value FROM {self._table_name} WHERE doc_id = ?', (id,)
)
res = r.fetchone()
if res is None:
raise KeyError(f'Can not find Document with id=`{id}`')
return res[0]
# essentials end here
# now start the optimized bulk methods
def _get_docs_by_offsets(self, offsets: Sequence[int]) -> Iterable['Document']:
ids = [self._offset2ids.get_id(offset) for offset in offsets]
return self._get_docs_by_ids(ids)
def _clear_storage(self):
self._sql(f'DELETE FROM {self._table_name}')
self._commit()
def _del_docs_by_ids(self, ids: str) -> Iterable['Document']:
self._sql(
f"DELETE FROM {self._table_name} WHERE doc_id in ({','.join(['?'] * len(ids))})",
ids,
)
self._save_offset2ids()
self._commit()
def _load_offset2ids(self):
if self._list_like:
r = self._sql(
f"SELECT doc_id FROM {self._table_name} ORDER BY item_order",
)
self._offset2ids = Offset2ID(
list(map(itemgetter(0), r)), list_like=self._list_like
)
else:
self._offset2ids = Offset2ID([], list_like=self._list_like)
def _save_offset2ids(self):
if self._list_like:
for offset, doc_id in enumerate(self._offset2ids):
self._sql(
f"""
UPDATE {self._table_name} SET item_order = ? WHERE {self._table_name}.doc_id = ?
""",
(offset, doc_id),
)
self._commit()
def _del_docs(self, ids):
super()._del_docs(ids)
self._save_offset2ids()
def _del_doc_by_offset(self, offset: int):
super()._del_doc_by_offset(offset)
self._save_offset2ids()
|
from operator import itemgetter
from typing import Sequence, Iterable
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Implement required and derived functions that power `getitem`, `setitem`, `delitem`"""
# essential methods start
def _del_doc_by_id(self, _id: str):
self._sql(f'DELETE FROM {self._table_name} WHERE doc_id=?', (_id,))
self._save_offset2ids()
self._commit()
def _set_doc_by_id(self, _id: str, value: 'Document'):
self._sql(
f'UPDATE {self._table_name} SET serialized_value=?, doc_id=? WHERE doc_id=?',
(value, value.id, _id),
)
self._commit()
def _get_doc_by_id(self, id: str) -> 'Document':
r = self._sql(
f'SELECT serialized_value FROM {self._table_name} WHERE doc_id = ?', (id,)
)
res = r.fetchone()
if res is None:
raise KeyError(f'Can not find Document with id=`{id}`')
return res[0]
# essentials end here
# now start the optimized bulk methods
def _get_docs_by_offsets(self, offsets: Sequence[int]) -> Iterable['Document']:
ids = [self._offset2ids.get_id(offset) for offset in offsets]
return self._get_docs_by_ids(ids)
def _clear_storage(self):
self._sql(f'DELETE FROM {self._table_name}')
self._commit()
def _del_docs_by_ids(self, ids: str) -> Iterable['Document']:
self._sql(
f"DELETE FROM {self._table_name} WHERE doc_id in ({','.join(['?'] * len(ids))})",
ids,
)
self._save_offset2ids()
self._commit()
def _load_offset2ids(self):
r = self._sql(
f"SELECT doc_id FROM {self._table_name} ORDER BY item_order",
)
self._offset2ids = Offset2ID(list(map(itemgetter(0), r)))
def _save_offset2ids(self):
for offset, doc_id in enumerate(self._offset2ids):
self._sql(
f"""
UPDATE {self._table_name} SET item_order = ? WHERE {self._table_name}.doc_id = ?
""",
(offset, doc_id),
)
self._commit()
def _del_docs(self, ids):
super()._del_docs(ids)
self._save_offset2ids()
def _del_doc_by_offset(self, offset: int):
super()._del_doc_by_offset(offset)
self._save_offset2ids()
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import torch
from docarray import BaseDoc
from docarray.typing import AudioTorchTensor, AudioUrl
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioTorchTensor]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=torch.randn(size=(1000, 2)),
)
doc_1.audio_tensor.save(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
"""
...
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import torch
from docarray import BaseDocument
from docarray.typing import AudioTorchTensor, AudioUrl
class MyAudioDoc(BaseDocument):
title: str
audio_tensor: Optional[AudioTorchTensor]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=torch.randn(size=(1000, 2)),
)
doc_1.audio_tensor.save(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
"""
...
|
from pydantic import Field
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
launch_darkly_sdk_key: str = Field(
default="",
description="The Launch Darkly SDK key",
validation_alias="LAUNCH_DARKLY_SDK_KEY",
)
model_config = SettingsConfigDict(case_sensitive=True, extra="ignore")
SETTINGS = Settings()
|
from pydantic import Field
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
launch_darkly_sdk_key: str = Field(
default="",
description="The Launch Darkly SDK key",
validation_alias="LAUNCH_DARKLY_SDK_KEY"
)
model_config = SettingsConfigDict(case_sensitive=True, extra="ignore")
SETTINGS = Settings()
|
"""Mixture modeling algorithms."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._bayesian_mixture import BayesianGaussianMixture
from ._gaussian_mixture import GaussianMixture
__all__ = ["BayesianGaussianMixture", "GaussianMixture"]
|
"""Mixture modeling algorithms."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._bayesian_mixture import BayesianGaussianMixture
from ._gaussian_mixture import GaussianMixture
__all__ = ["GaussianMixture", "BayesianGaussianMixture"]
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.mimetypes import POINT_CLOUD_EXTRA_EXTENSIONS
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
T = TypeVar('T', bound='PointCloud3DUrl')
@_register_proto(proto_type_name='point_cloud_url')
class PointCloud3DUrl(Url3D):
"""
URL to a file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def extra_extensions(cls) -> List[str]:
"""
Returns a list of additional file extensions that are valid for this class
but cannot be identified by the mimetypes library.
"""
return POINT_CLOUD_EXTRA_EXTENSIONS
def load(
self: T,
samples: int,
multiple_geometries: bool = False,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'PointsAndColors':
"""
Load the data from the url into an `NdArray` containing point cloud information.
---
```python
import numpy as np
from docarray import BaseDoc
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDoc):
point_cloud_url: PointCloud3DUrl
doc = MyDoc(point_cloud_url="thttps://people.sc.fsu.edu/~jburkardt/data/obj/al.obj")
# point_cloud = doc.point_cloud_url.load(samples=100)
# assert isinstance(point_cloud, np.ndarray)
# assert point_cloud.shape == (100, 3)
```
---
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:param skip_materials: Skip materials if True, else load.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: np.ndarray representing the point cloud
"""
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
if not trimesh_args:
trimesh_args = {}
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(
force='scene', skip_materials=skip_materials, **trimesh_args
)
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh', **trimesh_args)
point_cloud = np.array(mesh.sample(samples))
points = parse_obj_as(NdArray, point_cloud)
return PointsAndColors(points=points, colors=None)
def display(
self,
samples: int = 10000,
) -> None:
"""
Plot point cloud from url.
First, it loads the point cloud into a `PointsAndColors` object, and then
calls display on it. The following is therefore equivalent:
---
```python
import numpy as np
from docarray import BaseDoc
from docarray.documents import PointCloud3D
pc = PointCloud3D(url="https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj")
# option 1
# pc.url.display()
# option 2 (equivalent)
# pc.url.load(samples=10000).display()
```
---
:param samples: number of points to sample from the mesh.
"""
self.load(samples=samples, skip_materials=False).display()
|
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
T = TypeVar('T', bound='PointCloud3DUrl')
@_register_proto(proto_type_name='point_cloud_url')
class PointCloud3DUrl(Url3D):
"""
URL to a file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def load(
self: T,
samples: int,
multiple_geometries: bool = False,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'PointsAndColors':
"""
Load the data from the url into an `NdArray` containing point cloud information.
---
```python
import numpy as np
from docarray import BaseDoc
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDoc):
point_cloud_url: PointCloud3DUrl
doc = MyDoc(point_cloud_url="thttps://people.sc.fsu.edu/~jburkardt/data/obj/al.obj")
# point_cloud = doc.point_cloud_url.load(samples=100)
# assert isinstance(point_cloud, np.ndarray)
# assert point_cloud.shape == (100, 3)
```
---
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:param skip_materials: Skip materials if True, else load.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: np.ndarray representing the point cloud
"""
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
if not trimesh_args:
trimesh_args = {}
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(
force='scene', skip_materials=skip_materials, **trimesh_args
)
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh', **trimesh_args)
point_cloud = np.array(mesh.sample(samples))
points = parse_obj_as(NdArray, point_cloud)
return PointsAndColors(points=points, colors=None)
def display(
self,
samples: int = 10000,
) -> None:
"""
Plot point cloud from url.
First, it loads the point cloud into a `PointsAndColors` object, and then
calls display on it. The following is therefore equivalent:
---
```python
import numpy as np
from docarray import BaseDoc
from docarray.documents import PointCloud3D
pc = PointCloud3D(url="https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj")
# option 1
# pc.url.display()
# option 2 (equivalent)
# pc.url.load(samples=10000).display()
```
---
:param samples: number of points to sample from the mesh.
"""
self.load(samples=samples, skip_materials=False).display()
|
from . import utils
from .model import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
wav2vec2_xlsr_1b,
wav2vec2_xlsr_2b,
wav2vec2_xlsr_300m,
Wav2Vec2Model,
wavlm_base,
wavlm_large,
wavlm_model,
)
__all__ = [
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wavlm_model",
"wavlm_base",
"wavlm_large",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"utils",
"wav2vec2_xlsr_300m",
"wav2vec2_xlsr_1b",
"wav2vec2_xlsr_2b",
]
|
from . import utils
from .model import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
wavlm_base,
wavlm_large,
wavlm_model,
)
__all__ = [
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wavlm_model",
"wavlm_base",
"wavlm_large",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"utils",
]
|
"""Data struct for document summary index."""
from dataclasses import dataclass, field
from typing import Dict, List
from llama_index.core.data_structs.data_structs import IndexStruct
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.schema import BaseNode
@dataclass
class IndexDocumentSummary(IndexStruct):
"""
A simple struct containing a mapping from summary node_id to doc node_ids.
Also mapping vice versa.
"""
summary_id_to_node_ids: Dict[str, List[str]] = field(default_factory=dict)
node_id_to_summary_id: Dict[str, str] = field(default_factory=dict)
# track mapping from doc id to node summary id
doc_id_to_summary_id: Dict[str, str] = field(default_factory=dict)
def add_summary_and_nodes(
self,
summary_node: BaseNode,
nodes: List[BaseNode],
) -> str:
"""Add node and summary."""
summary_id = summary_node.node_id
ref_doc_id = summary_node.ref_doc_id
if ref_doc_id is None:
raise ValueError(
"ref_doc_id of node cannot be None when building a document "
"summary index"
)
self.doc_id_to_summary_id[ref_doc_id] = summary_id
for node in nodes:
node_id = node.node_id
if summary_id not in self.summary_id_to_node_ids:
self.summary_id_to_node_ids[summary_id] = []
self.summary_id_to_node_ids[summary_id].append(node_id)
self.node_id_to_summary_id[node_id] = summary_id
return summary_id
@property
def summary_ids(self) -> List[str]:
"""Get summary ids."""
return list(self.summary_id_to_node_ids.keys())
def delete(self, doc_id: str) -> None:
"""Delete a document and its nodes."""
summary_id = self.doc_id_to_summary_id[doc_id]
del self.doc_id_to_summary_id[doc_id]
node_ids = self.summary_id_to_node_ids[summary_id]
for node_id in node_ids:
del self.node_id_to_summary_id[node_id]
del self.summary_id_to_node_ids[summary_id]
def delete_nodes(self, node_ids: List[str]) -> None:
for node_id in node_ids:
summary_id = self.node_id_to_summary_id[node_id]
self.summary_id_to_node_ids[summary_id].remove(node_id)
del self.node_id_to_summary_id[node_id]
@classmethod
def get_type(cls) -> IndexStructType:
"""Get type."""
return IndexStructType.DOCUMENT_SUMMARY
|
"""Data struct for document summary index."""
from dataclasses import dataclass, field
from typing import Dict, List
from llama_index.core.data_structs.data_structs import IndexStruct
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.schema import BaseNode
@dataclass
class IndexDocumentSummary(IndexStruct):
"""A simple struct containing a mapping from summary node_id to doc node_ids.
Also mapping vice versa.
"""
summary_id_to_node_ids: Dict[str, List[str]] = field(default_factory=dict)
node_id_to_summary_id: Dict[str, str] = field(default_factory=dict)
# track mapping from doc id to node summary id
doc_id_to_summary_id: Dict[str, str] = field(default_factory=dict)
def add_summary_and_nodes(
self,
summary_node: BaseNode,
nodes: List[BaseNode],
) -> str:
"""Add node and summary."""
summary_id = summary_node.node_id
ref_doc_id = summary_node.ref_doc_id
if ref_doc_id is None:
raise ValueError(
"ref_doc_id of node cannot be None when building a document "
"summary index"
)
self.doc_id_to_summary_id[ref_doc_id] = summary_id
for node in nodes:
node_id = node.node_id
if summary_id not in self.summary_id_to_node_ids:
self.summary_id_to_node_ids[summary_id] = []
self.summary_id_to_node_ids[summary_id].append(node_id)
self.node_id_to_summary_id[node_id] = summary_id
return summary_id
@property
def summary_ids(self) -> List[str]:
"""Get summary ids."""
return list(self.summary_id_to_node_ids.keys())
def delete(self, doc_id: str) -> None:
"""Delete a document and its nodes."""
summary_id = self.doc_id_to_summary_id[doc_id]
del self.doc_id_to_summary_id[doc_id]
node_ids = self.summary_id_to_node_ids[summary_id]
for node_id in node_ids:
del self.node_id_to_summary_id[node_id]
del self.summary_id_to_node_ids[summary_id]
def delete_nodes(self, node_ids: List[str]) -> None:
for node_id in node_ids:
summary_id = self.node_id_to_summary_id[node_id]
self.summary_id_to_node_ids[summary_id].remove(node_id)
del self.node_id_to_summary_id[node_id]
@classmethod
def get_type(cls) -> IndexStructType:
"""Get type."""
return IndexStructType.DOCUMENT_SUMMARY
|
from typing import Union, TypeVar, Any, TYPE_CHECKING, Type, cast
import numpy as np
if TYPE_CHECKING:
from pydantic.fields import ModelField
from pydantic import BaseConfig
from docarray.document.base_node import BaseNode
from docarray.proto import NdArrayProto, NodeProto
T = TypeVar('T', bound='Tensor')
class Tensor(np.ndarray, BaseNode):
@classmethod
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def validate(
cls: Type[T], value: Union[T, Any], field: 'ModelField', config: 'BaseConfig'
) -> T:
if isinstance(value, np.ndarray):
return cls.from_ndarray(value)
elif isinstance(value, Tensor):
return cast(T, value)
else:
try:
arr: np.ndarray = np.ndarray(value)
return cls.from_ndarray(arr)
except Exception:
pass # handled below
raise ValueError(f'Expected a numpy.ndarray, got {type(value)}')
@classmethod
def from_ndarray(cls: Type[T], value: np.ndarray) -> T:
return value.view(cls)
def _to_nested_item_protobuf(self: T) -> 'NodeProto':
"""Convert Document into a nested item protobuf message. This function should
be called when the Document is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
nd_proto = NdArrayProto()
self.flush_ndarray(nd_proto, value=self)
NodeProto(tensor=nd_proto)
return NodeProto(tensor=nd_proto)
@classmethod
def read_ndarray(cls: Type[T], pb_msg: 'NdArrayProto') -> 'T':
"""
read ndarray from a proto msg
:param pb_msg:
:return: a numpy array
"""
source = pb_msg.dense
if source.buffer:
x = np.frombuffer(source.buffer, dtype=source.dtype)
return cls.from_ndarray(x.reshape(source.shape))
elif len(source.shape) > 0:
return cls.from_ndarray(np.zeros(source.shape))
else:
raise ValueError(f'proto message {pb_msg} cannot be cast to a Tensor')
@staticmethod
def flush_ndarray(pb_msg: 'NdArrayProto', value: 'Tensor'):
pb_msg.dense.buffer = value.tobytes()
pb_msg.dense.ClearField('shape')
pb_msg.dense.shape.extend(list(value.shape))
pb_msg.dense.dtype = value.dtype.str
|
from typing import Union, TypeVar, Any, TYPE_CHECKING, Type, cast
import numpy as np
if TYPE_CHECKING:
from pydantic.fields import ModelField
from pydantic import BaseConfig, PydanticValueError
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NdArrayProto, NodeProto
T = TypeVar('T', bound='Tensor')
class Tensor(np.ndarray, BaseNode):
@classmethod
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def validate(cls: Type[T], value: Union[T, Any], field: 'ModelField', config: 'BaseConfig') -> T:
if isinstance(value, np.ndarray):
return cls.from_ndarray(value)
elif isinstance(value, Tensor):
return cast(T, value)
else:
try:
arr: np.ndarray = np.ndarray(value)
return cls.from_ndarray(arr)
except Exception:
pass # handled below
raise ValueError(f'Expected a numpy.ndarray, got {type(value)}')
@classmethod
def from_ndarray(cls: Type[T], value: np.ndarray) -> T:
return value.view(cls)
def _to_nested_item_protobuf(self: T) -> 'NodeProto':
"""Convert Document into a nested item protobuf message. This function should be called when the Document
is nested into another Document that need to be converted into a protobuf
:return: the nested item protobuf message
"""
nd_proto = NdArrayProto()
self.flush_ndarray(nd_proto, value=self)
NodeProto(tensor=nd_proto)
return NodeProto(tensor=nd_proto)
@classmethod
def read_ndarray(cls: Type[T], pb_msg: 'NdArrayProto') -> 'T':
"""
read ndarray from a proto msg
:param pb_msg:
:return: a numpy array
"""
source = pb_msg.dense
if source.buffer:
x = np.frombuffer(source.buffer, dtype=source.dtype)
return cls.from_ndarray(x.reshape(source.shape))
elif len(source.shape) > 0:
return cls.from_ndarray(np.zeros(source.shape))
else:
raise ValueError(f'proto message {pb_msg} cannot be cast to a Tensor')
@staticmethod
def flush_ndarray(pb_msg: 'NdArrayProto', value: 'Tensor'):
pb_msg.dense.buffer = value.tobytes()
pb_msg.dense.ClearField('shape')
pb_msg.dense.shape.extend(list(value.shape))
pb_msg.dense.dtype = value.dtype.str
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOG_PROCESSOR, LOOPS,
METRICS, MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS,
OPTIMIZERS, PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS,
TASK_UTILS, TRANSFORMS, VISBACKENDS, VISUALIZERS,
WEIGHT_INITIALIZERS)
from .utils import count_registered_modules, traverse_registry_tree
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'METRICS', 'MODEL_WRAPPERS', 'LOOPS', 'VISBACKENDS', 'VISUALIZERS',
'LOG_PROCESSOR', 'DefaultScope', 'traverse_registry_tree',
'count_registered_modules'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .default_scope import DefaultScope
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, LOOPS, METRICS,
MODEL_WRAPPERS, MODELS, OPTIMIZER_CONSTRUCTORS, OPTIMIZERS,
PARAM_SCHEDULERS, RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS,
TRANSFORMS, VISBACKENDS, VISUALIZERS, WEIGHT_INITIALIZERS)
from .utils import count_registered_modules, traverse_registry_tree
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS',
'METRICS', 'MODEL_WRAPPERS', 'LOOPS', 'VISBACKENDS', 'VISUALIZERS',
'DefaultScope', 'traverse_registry_tree', 'count_registered_modules'
]
|
"""Build configuration"""
import dataclasses
from typing import Any, Dict, List, Optional
@dataclasses.dataclass
class BuildConfiguration: # pylint: disable=R0902
"""Configurations use when building libxgboost"""
# Whether to hide C++ symbols in libxgboost.so
hide_cxx_symbols: bool = True
# Whether to enable OpenMP
use_openmp: bool = True
# Whether to enable CUDA
use_cuda: bool = False
# Whether to enable NCCL
use_nccl: bool = False
# Whether to load nccl dynamically
use_dlopen_nccl: bool = False
# Whether to enable federated learning
plugin_federated: bool = False
# Whether to enable rmm support
plugin_rmm: bool = False
# Special option: See explanation below
use_system_libxgboost: bool = False
def _set_config_setting(self, config_settings: Dict[str, Any]) -> None:
for field_name in config_settings:
setattr(
self,
field_name,
(config_settings[field_name].lower() in ["true", "1", "on"]),
)
def update(self, config_settings: Optional[Dict[str, Any]]) -> None:
"""Parse config_settings from Pip (or other PEP 517 frontend)"""
if config_settings is not None:
self._set_config_setting(config_settings)
def get_cmake_args(self) -> List[str]:
"""Convert build configuration to CMake args"""
cmake_args = []
for field_name in [x.name for x in dataclasses.fields(self)]:
if field_name in ["use_system_libxgboost"]:
continue
cmake_option = field_name.upper()
cmake_value = "ON" if getattr(self, field_name) is True else "OFF"
cmake_args.append(f"-D{cmake_option}={cmake_value}")
return cmake_args
|
"""Build configuration"""
import dataclasses
from typing import Any, Dict, List, Optional
@dataclasses.dataclass
class BuildConfiguration: # pylint: disable=R0902
"""Configurations use when building libxgboost"""
# Whether to hide C++ symbols in libxgboost.so
hide_cxx_symbols: bool = True
# Whether to enable OpenMP
use_openmp: bool = True
# Whether to enable CUDA
use_cuda: bool = False
# Whether to enable NCCL
use_nccl: bool = False
# Whether to load nccl dynamically
use_dlopen_nccl: bool = False
# Whether to enable federated learning
plugin_federated: bool = False
# Whether to enable rmm support
plugin_rmm: bool = False
# Special option: See explanation below
use_system_libxgboost: bool = False
def _set_config_setting(self, config_settings: Dict[str, Any]) -> None:
for field_name in config_settings:
setattr(
self,
field_name,
(config_settings[field_name].lower() in ["true", "1", "on"]),
)
def update(self, config_settings: Optional[Dict[str, Any]]) -> None:
"""Parse config_settings from Pip (or other PEP 517 frontend)"""
if config_settings is not None:
self._set_config_setting(config_settings)
def get_cmake_args(self) -> List[str]:
"""Convert build configuration to CMake args"""
cmake_args = []
for field_name in [x.name for x in dataclasses.fields(self)]:
if field_name in ["use_system_libxgboost"]:
continue
cmake_option = field_name.upper()
cmake_value = "ON" if getattr(self, field_name) is True else "OFF"
cmake_args.append(f"-D{cmake_option}={cmake_value}")
return cmake_args
|
from typing import Any, Dict, List, Optional, Union
from docarray.utils._internal.query_language.lookup import (
LookupLeaf,
LookupNode,
LookupTreeElem,
Q,
)
LOGICAL_OPERATORS: Dict[str, Union[str, bool]] = {
'$and': 'and',
'$or': 'or',
'$not': True,
}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'$gte': 'gte',
'$eq': 'exact',
'$neq': 'neq',
'$exists': 'exists',
}
REGEX_OPERATORS = {'$regex': 'regex'}
ARRAY_OPERATORS = {'$size': 'size'}
MEMBERSHIP_OPERATORS = {'$in': 'in', '$nin': 'nin'}
SUPPORTED_OPERATORS = {
**COMPARISON_OPERATORS,
**ARRAY_OPERATORS,
**REGEX_OPERATORS,
**MEMBERSHIP_OPERATORS,
}
def _parse_lookups(
data: Union[Dict, List] = {}, root_node: Optional[LookupTreeElem] = None
) -> Optional[LookupTreeElem]:
if isinstance(data, dict):
for key, value in data.items():
node: Optional[LookupTreeElem] = None
if isinstance(root_node, LookupLeaf):
root = LookupNode()
root.add_child(root_node)
root_node = root
if key in LOGICAL_OPERATORS:
if key == '$not':
node = LookupNode(negate=True)
else:
node = LookupNode(op=LOGICAL_OPERATORS[key])
node = _parse_lookups(value, root_node=node)
elif key.startswith('$'):
raise ValueError(
f'The operator {key} is not supported yet,'
f' please double check the given filters!'
)
else:
if not value or not isinstance(value, dict):
raise ValueError(
'''Not a valid query. It should follow the format:
{ <field1>: { <operator1>: <value1> }, ... }
'''
)
items = list(value.items())
if len(items) == 1:
op, val = items[0]
if op in LOGICAL_OPERATORS:
if op == '$not':
node = LookupNode(negate=True)
else:
node = LookupNode(op=LOGICAL_OPERATORS[op])
node = _parse_lookups(val, root_node=node)
elif op in SUPPORTED_OPERATORS:
node = Q(**{f'{key}.{SUPPORTED_OPERATORS[op]}': val})
else:
raise ValueError(
f'The operator {op} is not supported yet, '
f'please double check the given filters!'
)
else:
node = LookupNode()
for op, val in items:
_node = _parse_lookups({key: {op: val}})
node.add_child(_node)
if root_node and node:
if isinstance(root_node, LookupNode):
root_node.add_child(node)
elif node:
root_node = node
elif isinstance(data, list):
for d in data:
node = _parse_lookups(d)
if root_node and node:
if isinstance(root_node, LookupNode):
root_node.add_child(node)
elif node:
root_node = node
else:
raise ValueError(f'The query is illegal: `{data}`')
return root_node
class QueryParser:
"""A class to parse dict condition to lookup query."""
def __init__(self, conditions: Union[Dict, List] = {}):
self.conditions = conditions
self.lookup_groups = _parse_lookups(self.conditions)
def evaluate(self, doc: Any) -> bool:
return self.lookup_groups.evaluate(doc) if self.lookup_groups else True
def __call__(self, doc: Any) -> bool:
return self.evaluate(doc)
|
from typing import Any, Dict, List, Optional, Union
from docarray.utils._internal.query_language.lookup import (
LookupLeaf,
LookupNode,
LookupTreeElem,
Q,
)
LOGICAL_OPERATORS: Dict[str, Union[str, bool]] = {
'$and': 'and',
'$or': 'or',
'$not': True,
}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'$gte': 'gte',
'$eq': 'exact',
'$neq': 'neq',
'$exists': 'exists',
}
REGEX_OPERATORS = {'$regex': 'regex'}
ARRAY_OPERATORS = {'$size': 'size'}
MEMBERSHIP_OPERATORS = {'$in': 'in', '$nin': 'nin'}
SUPPORTED_OPERATORS = {
**COMPARISON_OPERATORS,
**ARRAY_OPERATORS,
**REGEX_OPERATORS,
**MEMBERSHIP_OPERATORS,
}
def _parse_lookups(
data: Union[Dict, List] = {}, root_node: Optional[LookupTreeElem] = None
) -> Optional[LookupTreeElem]:
if isinstance(data, dict):
for key, value in data.items():
node: Optional[LookupTreeElem] = None
if isinstance(root_node, LookupLeaf):
root = LookupNode()
root.add_child(root_node)
root_node = root
if key in LOGICAL_OPERATORS:
if key == '$not':
node = LookupNode(negate=True)
else:
node = LookupNode(op=LOGICAL_OPERATORS[key])
node = _parse_lookups(value, root_node=node)
elif key.startswith('$'):
raise ValueError(
f'The operator {key} is not supported yet,'
f' please double check the given filters!'
)
else:
if not value or not isinstance(value, dict):
raise ValueError(
'''Not a valid query. It should follow the format:
{ <field1>: { <operator1>: <value1> }, ... }
'''
)
items = list(value.items())
if len(items) == 1:
op, val = items[0]
if op in LOGICAL_OPERATORS:
if op == '$not':
node = LookupNode(negate=True)
else:
node = LookupNode(op=LOGICAL_OPERATORS[op])
node = _parse_lookups(val, root_node=node)
elif op in SUPPORTED_OPERATORS:
node = Q(**{f'{key}.{SUPPORTED_OPERATORS[op]}': val})
else:
raise ValueError(
f'The operator {op} is not supported yet, '
f'please double check the given filters!'
)
else:
node = LookupNode()
for op, val in items:
_node = _parse_lookups({key: {op: val}})
node.add_child(_node)
if root_node and node:
if isinstance(root_node, LookupNode):
root_node.add_child(node)
elif node:
root_node = node
elif isinstance(data, list):
for d in data:
node = _parse_lookups(d)
if root_node and node:
if isinstance(root_node, LookupNode):
root_node.add_child(node)
elif node:
root_node = node
else:
raise ValueError(f'The query is illegal: `{data}`')
return root_node
class QueryParser:
"""A class to parse dict condition to lookup query."""
def __init__(self, conditions: Union[Dict, List] = {}):
self.conditions = conditions
self.lookup_groups = _parse_lookups(self.conditions)
def evaluate(self, doc: Any) -> bool:
return self.lookup_groups.evaluate(doc) if self.lookup_groups else True
def __call__(self, doc: Any) -> bool:
return self.evaluate(doc)
|
import os
from deprecated import deprecated
from typing import Any, Optional
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
DEFAULT_API_BASE = "https://api.studio.nebius.ai/v1"
@deprecated(
reason="This class has been deprecated and will no longer be maintained. Please use llama-index-llms-nebius instead. See Multi Modal LLMs documentation for a complete guide on migration: https://docs.llamaindex.ai/en/stable/understanding/using_llms/using_llms/#multi-modal-llms",
version="0.4.1",
)
class NebiusMultiModal(OpenAIMultiModal):
"""
Nebius AI Studio Multimodal class.
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = DEFAULT_API_BASE,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("NEBIUS_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "nebius_multi_modal_llm"
def _get_model_kwargs(self, **kwargs: Any) -> dict[str, Any]:
base_kwargs = {"model": self.model, "temperature": self.temperature, **kwargs}
if self.max_new_tokens is not None:
base_kwargs["max_tokens"] = self.max_new_tokens
return {**base_kwargs, **self.additional_kwargs}
|
import os
from typing import Any, Optional
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
DEFAULT_API_BASE = "https://api.studio.nebius.ai/v1"
class NebiusMultiModal(OpenAIMultiModal):
"""
Nebius AI Studio Multimodal class.
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = DEFAULT_API_BASE,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("NEBIUS_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "nebius_multi_modal_llm"
def _get_model_kwargs(self, **kwargs: Any) -> dict[str, Any]:
base_kwargs = {"model": self.model, "temperature": self.temperature, **kwargs}
if self.max_new_tokens is not None:
base_kwargs["max_tokens"] = self.max_new_tokens
return {**base_kwargs, **self.additional_kwargs}
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparse_sigmoid
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.activations.activations import threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import sparsemax
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.activations.activations import threshold
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch.utils.checkpoint import checkpoint
from mmdet.registry import MODELS
@MODELS.register_module()
class HRFPN(BaseModule):
"""HRFPN (High Resolution Feature Pyramids)
paper: `High-Resolution Representations for Labeling Pixels and Regions
<https://arxiv.org/abs/1904.04514>`_.
Args:
in_channels (list): number of channels for each branch.
out_channels (int): output channels of feature pyramids.
num_outs (int): number of output stages.
pooling_type (str): pooling for generating feature pyramids
from {MAX, AVG}.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
stride (int): stride of 3x3 convolutional layers
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
num_outs=5,
pooling_type='AVG',
conv_cfg=None,
norm_cfg=None,
with_cp=False,
stride=1,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):
super(HRFPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reduction_conv = ConvModule(
sum(in_channels),
out_channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
act_cfg=None)
self.fpn_convs = nn.ModuleList()
for i in range(self.num_outs):
self.fpn_convs.append(
ConvModule(
out_channels,
out_channels,
kernel_size=3,
padding=1,
stride=stride,
conv_cfg=self.conv_cfg,
act_cfg=None))
if pooling_type == 'MAX':
self.pooling = F.max_pool2d
else:
self.pooling = F.avg_pool2d
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_ins
outs = [inputs[0]]
for i in range(1, self.num_ins):
outs.append(
F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
out = torch.cat(outs, dim=1)
if out.requires_grad and self.with_cp:
out = checkpoint(self.reduction_conv, out)
else:
out = self.reduction_conv(out)
outs = [out]
for i in range(1, self.num_outs):
outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
outputs = []
for i in range(self.num_outs):
if outs[i].requires_grad and self.with_cp:
tmp_out = checkpoint(self.fpn_convs[i], outs[i])
else:
tmp_out = self.fpn_convs[i](outs[i])
outputs.append(tmp_out)
return tuple(outputs)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.utils.checkpoint import checkpoint
from mmdet.registry import MODELS
@MODELS.register_module()
class HRFPN(BaseModule):
"""HRFPN (High Resolution Feature Pyramids)
paper: `High-Resolution Representations for Labeling Pixels and Regions
<https://arxiv.org/abs/1904.04514>`_.
Args:
in_channels (list): number of channels for each branch.
out_channels (int): output channels of feature pyramids.
num_outs (int): number of output stages.
pooling_type (str): pooling for generating feature pyramids
from {MAX, AVG}.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
stride (int): stride of 3x3 convolutional layers
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
num_outs=5,
pooling_type='AVG',
conv_cfg=None,
norm_cfg=None,
with_cp=False,
stride=1,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):
super(HRFPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reduction_conv = ConvModule(
sum(in_channels),
out_channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
act_cfg=None)
self.fpn_convs = nn.ModuleList()
for i in range(self.num_outs):
self.fpn_convs.append(
ConvModule(
out_channels,
out_channels,
kernel_size=3,
padding=1,
stride=stride,
conv_cfg=self.conv_cfg,
act_cfg=None))
if pooling_type == 'MAX':
self.pooling = F.max_pool2d
else:
self.pooling = F.avg_pool2d
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_ins
outs = [inputs[0]]
for i in range(1, self.num_ins):
outs.append(
F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
out = torch.cat(outs, dim=1)
if out.requires_grad and self.with_cp:
out = checkpoint(self.reduction_conv, out)
else:
out = self.reduction_conv(out)
outs = [out]
for i in range(1, self.num_outs):
outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
outputs = []
for i in range(self.num_outs):
if outs[i].requires_grad and self.with_cp:
tmp_out = checkpoint(self.fpn_convs[i], outs[i])
else:
tmp_out = self.fpn_convs[i](outs[i])
outputs.append(tmp_out)
return tuple(outputs)
|
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
T = TypeVar('T', bound='PointCloud3DUrl')
@_register_proto(proto_type_name='point_cloud_url')
class PointCloud3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def load(
self: T,
samples: int,
multiple_geometries: bool = False,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'PointsAndColors':
"""
Load the data from the url into an NdArray containing point cloud information.
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray import BaseDoc
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDoc):
point_cloud_url: PointCloud3DUrl
doc = MyDoc(point_cloud_url="toydata/tetrahedron.obj")
point_cloud = doc.point_cloud_url.load(samples=100)
assert isinstance(point_cloud, np.ndarray)
assert point_cloud.shape == (100, 3)
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:param skip_materials: Skip materials if True, else load.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: np.ndarray representing the point cloud
"""
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
if not trimesh_args:
trimesh_args = {}
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(
force='scene', skip_materials=skip_materials, **trimesh_args
)
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh', **trimesh_args)
point_cloud = np.array(mesh.sample(samples))
points = parse_obj_as(NdArray, point_cloud)
return PointsAndColors(points=points, colors=None)
def display(
self,
samples: int = 10000,
) -> None:
"""
Plot point cloud from url.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
First, it loads the point cloud into a :class:`PointsAndColors` object, and then
calls display on it. The following is therefore equivalent:
.. code-block:: python
import numpy as np
from docarray import BaseDoc
from docarray.documents import PointCloud3D
pc = PointCloud3D("toydata/tetrahedron.obj")
# option 1
pc.url.display()
# option 2 (equivalent)
pc.url.load(samples=10000).display()
:param samples: number of points to sample from the mesh.
"""
self.load(samples=samples, skip_materials=False).display()
|
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
T = TypeVar('T', bound='PointCloud3DUrl')
@_register_proto(proto_type_name='point_cloud_url')
class PointCloud3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def load(
self: T,
samples: int,
multiple_geometries: bool = False,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> 'PointsAndColors':
"""
Load the data from the url into an NdArray containing point cloud information.
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray import BaseDocument
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDocument):
point_cloud_url: PointCloud3DUrl
doc = MyDoc(point_cloud_url="toydata/tetrahedron.obj")
point_cloud = doc.point_cloud_url.load(samples=100)
assert isinstance(point_cloud, np.ndarray)
assert point_cloud.shape == (100, 3)
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:param skip_materials: Skip materials if True, else load.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: np.ndarray representing the point cloud
"""
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
if not trimesh_args:
trimesh_args = {}
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(
force='scene', skip_materials=skip_materials, **trimesh_args
)
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh', **trimesh_args)
point_cloud = np.array(mesh.sample(samples))
points = parse_obj_as(NdArray, point_cloud)
return PointsAndColors(points=points, colors=None)
def display(
self,
samples: int = 10000,
) -> None:
"""
Plot point cloud from url.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
First, it loads the point cloud into a :class:`PointsAndColors` object, and then
calls display on it. The following is therefore equivalent:
.. code-block:: python
import numpy as np
from docarray import BaseDocument
from docarray.documents import PointCloud3D
pc = PointCloud3D("toydata/tetrahedron.obj")
# option 1
pc.url.display()
# option 2 (equivalent)
pc.url.load(samples=10000).display()
:param samples: number of points to sample from the mesh.
"""
self.load(samples=samples, skip_materials=False).display()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import polar
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import sparsemax
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
from keras.src.ops.nn import threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import sparsemax
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
from keras.src.ops.nn import threshold
|
import warnings
from abc import abstractmethod
from typing import Iterable, Iterator, MutableSequence
from docarray import Document, DocumentArray
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def _update_subindices_append_extend(self, value):
if getattr(self, '_subindices', None):
for selector, da in self._subindices.items():
value = DocumentArray(value)
if getattr(da, '_config', None) and da._config.root_id:
for v in value:
for doc in DocumentArray(v)[selector]:
doc.tags['_root_id_'] = v.id
docs_selector = value[selector]
if len(docs_selector) > 0:
da.extend(docs_selector)
def insert(self, index: int, value: 'Document', **kwargs):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
:param kwargs: Additional Arguments that are passed to the Document Store. This has no effect for in-memory DocumentArray.
"""
self._set_doc_by_id(value.id, value, **kwargs)
self._offset2ids.insert(index, value.id)
def append(self, value: 'Document', **kwargs):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._append(value, **kwargs)
self._update_subindices_append_extend(value)
def _append(self, value, **kwargs):
self._set_doc_by_id(value.id, value)
self._offset2ids.append(value.id)
@abstractmethod
def __eq__(self, other):
...
def __len__(self):
...
def __iter__(self) -> Iterator['Document']:
for _id in self._offset2ids:
yield self._get_doc_by_id(_id)
@abstractmethod
def __contains__(self, other):
...
def clear(self):
"""Clear the data of :class:`DocumentArray`"""
self._del_all_docs()
def __bool__(self):
"""To simulate ```l = []; if l: ...```
:return: returns true if the length of the array is larger than 0
"""
return len(self) > 0
def extend(self, values: Iterable['Document'], **kwargs) -> None:
from docarray.helper import check_root_id
if self._is_subindex:
check_root_id(self, values)
self._extend(values, **kwargs)
self._update_subindices_append_extend(values)
def _extend(self, values, **kwargs):
for value in values:
self._append(value, **kwargs)
|
import warnings
from abc import abstractmethod
from typing import Iterable, Iterator, MutableSequence
from docarray import Document, DocumentArray
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def _update_subindices_append_extend(self, value):
if getattr(self, '_subindices', None):
for selector, da in self._subindices.items():
value = DocumentArray(value)
if getattr(da, '_config', None) and da._config.root_id:
for v in value:
for doc in DocumentArray(v)[selector]:
doc.tags['_root_id_'] = v.id
docs_selector = value[selector]
if len(docs_selector) > 0:
da.extend(docs_selector)
def insert(self, index: int, value: 'Document', **kwargs):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
:param kwargs: Additional Arguments that are passed to the Document Store. This has no effect for in-memory DocumentArray.
"""
self._set_doc_by_id(value.id, value, **kwargs)
self._offset2ids.insert(index, value.id)
def append(self, value: 'Document', **kwargs):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._append(value, **kwargs)
self._update_subindices_append_extend(value)
def _append(self, value, **kwargs):
self._set_doc_by_id(value.id, value)
self._offset2ids.append(value.id)
@abstractmethod
def __eq__(self, other):
...
def __len__(self):
return len(self._offset2ids)
def __iter__(self) -> Iterator['Document']:
for _id in self._offset2ids:
yield self._get_doc_by_id(_id)
@abstractmethod
def __contains__(self, other):
...
def clear(self):
"""Clear the data of :class:`DocumentArray`"""
self._del_all_docs()
def __bool__(self):
"""To simulate ```l = []; if l: ...```
:return: returns true if the length of the array is larger than 0
"""
return len(self) > 0
def extend(self, values: Iterable['Document'], **kwargs) -> None:
from docarray.helper import check_root_id
if self._is_subindex:
check_root_id(self, values)
self._extend(values, **kwargs)
self._update_subindices_append_extend(values)
def _extend(self, values, **kwargs):
for value in values:
self._append(value, **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple, Union
import cv2
import numpy as np
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.dl_utils import tensor2imgs
DATA_BATCH = Optional[Union[dict, tuple, list]]
# TODO: Due to interface changes, the current class
# functions incorrectly
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Defaults to 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
"""Unpad the input image.
Args:
input (np.ndarray): The image to unpad.
unpad_shape (tuple): The shape of image before padding.
Returns:
np.ndarray: The image before padding.
"""
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Sequence] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (dict or tuple or list, optional): Data from dataloader.
outputs (Sequence, optional): Outputs from model.
"""
if self.every_n_inner_iters(batch_idx, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.visualizer.add_datasample(name, origin_image,
data_sample, output,
self.draw_gt, self.draw_pred)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.structures import BaseDataElement
from mmengine.utils.dl_utils import tensor2imgs
# TODO: Due to interface changes, the current class
# functions incorrectly
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Defaults to 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
"""Unpad the input image.
Args:
input (np.ndarray): The image to unpad.
unpad_shape (tuple): The shape of image before padding.
Returns:
np.ndarray: The image before padding.
"""
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Sequence[BaseDataElement]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[dict], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataElement], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_inner_iters(batch_idx, self._interval):
for data, output in zip(data_batch, outputs): # type: ignore
input = data['inputs']
data_sample = data['data_sample']
input = tensor2imgs(input,
**data_sample.get('img_norm_cfg',
dict()))[0]
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.visualizer.add_datasample(name, origin_image,
data_sample, output,
self.draw_gt, self.draw_pred)
|
from jina import Flow, Document, DocumentArray
from ...flair_text import FlairTextEncoder
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(
text='it is a good day! the dog sits on the floor.')
yield doc
def test_use_in_flow():
with Flow.load_config('flow.yml') as flow:
data = flow.post(on='/encode', inputs=data_generator(5), return_results=True)
docs = data[0].docs
for doc in docs:
assert doc.embedding.shape == (100,)
|
from jina import Flow, Document, DocumentArray
from jinahub.encoder.flair_text import FlairTextEncoder
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(
text='it is a good day! the dog sits on the floor.')
yield doc
def test_use_in_flow():
with Flow.load_config('flow.yml') as flow:
data = flow.post(on='/encode', inputs=data_generator(5), return_results=True)
docs = data[0].docs
for doc in docs:
assert doc.embedding.shape == (100,)
|
"""Code to help indexing data into a vectorstore.
This package contains helper logic to help deal with indexing data into
a vectorstore while avoiding duplicated content and over-writing content
if it's unchanged.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.indexing.api import IndexingResult, aindex, index
from langchain_core.indexing.base import (
DeleteResponse,
DocumentIndex,
InMemoryRecordManager,
RecordManager,
UpsertResponse,
)
__all__ = [
"aindex",
"DeleteResponse",
"DocumentIndex",
"index",
"IndexingResult",
"InMemoryRecordManager",
"RecordManager",
"UpsertResponse",
]
_dynamic_imports = {
"aindex": "api",
"index": "api",
"IndexingResult": "api",
"DeleteResponse": "base",
"DocumentIndex": "base",
"InMemoryRecordManager": "base",
"RecordManager": "base",
"UpsertResponse": "base",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Code to help indexing data into a vectorstore.
This package contains helper logic to help deal with indexing data into
a vectorstore while avoiding duplicated content and over-writing content
if it's unchanged.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.indexing.api import IndexingResult, aindex, index
from langchain_core.indexing.base import (
DeleteResponse,
DocumentIndex,
InMemoryRecordManager,
RecordManager,
UpsertResponse,
)
__all__ = [
"aindex",
"DeleteResponse",
"DocumentIndex",
"index",
"IndexingResult",
"InMemoryRecordManager",
"RecordManager",
"UpsertResponse",
]
_dynamic_imports = {
"aindex": "api",
"index": "api",
"IndexingResult": "api",
"DeleteResponse": "base",
"DocumentIndex": "base",
"InMemoryRecordManager": "base",
"RecordManager": "base",
"UpsertResponse": "base",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .layer_decay_optimizer_constructor import \
LearningRateDecayOptimizerConstructor
__all__ = ['LearningRateDecayOptimizerConstructor']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import OPTIMIZER_BUILDERS, build_optimizer
from .layer_decay_optimizer_constructor import \
LearningRateDecayOptimizerConstructor
__all__ = [
'LearningRateDecayOptimizerConstructor', 'OPTIMIZER_BUILDERS',
'build_optimizer'
]
|
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union
from docarray.array.mixins import ParallelMixin, GroupMixin
from docarray.helper import protocol_and_compress_from_file_path
if TYPE_CHECKING:
from docarray import Document, DocumentArray
class DocumentArrayLoader(ParallelMixin, GroupMixin):
def __init__(
self,
path: Union[str, Path],
protocol: str = 'protobuf',
compress: Optional[str] = None,
show_progress: bool = False,
):
self._show_progress = show_progress
self._filename = path
self._protocol, self._compress = protocol_and_compress_from_file_path(
path, protocol, compress
)
with open(path, 'rb') as f:
version_numdocs_lendoc0 = f.read(9)
# 8 bytes (uint64)
self._len = int.from_bytes(
version_numdocs_lendoc0[1:9], 'big', signed=False
)
self._iter = iter(self)
def __iter__(self):
from docarray import Document
from docarray.array.mixins.io.pbar import get_progressbar
from rich import filesize
with open(self._filename, 'rb') as f:
f.read(9)
pbar, t = get_progressbar(
'Deserializing', disable=not self._show_progress, total=self._len
)
with pbar:
_total_size = 0
pbar.start_task(t)
for _ in range(self._len):
# 4 bytes (uint32)
len_current_doc_in_bytes = int.from_bytes(
f.read(4), 'big', signed=False
)
_total_size += len_current_doc_in_bytes
yield Document.from_bytes(
f.read(len_current_doc_in_bytes),
protocol=self._protocol,
compress=self._compress,
)
pbar.update(
t, advance=1, total_size=str(filesize.decimal(_total_size))
)
def __len__(self):
return self._len
def __getitem__(self, item: list) -> 'DocumentArray':
from docarray import DocumentArray
da = DocumentArray()
for _ in item:
da.append(next(self._iter))
return da
|
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union
from .. import ParallelMixin, GroupMixin
from ....helper import protocol_and_compress_from_file_path
if TYPE_CHECKING:
from docarray import Document, DocumentArray
class DocumentArrayLoader(ParallelMixin, GroupMixin):
def __init__(
self,
path: Union[str, Path],
protocol: str = 'protobuf',
compress: Optional[str] = None,
show_progress: bool = False,
):
self._show_progress = show_progress
self._filename = path
self._protocol, self._compress = protocol_and_compress_from_file_path(
path, protocol, compress
)
with open(path, 'rb') as f:
version_numdocs_lendoc0 = f.read(9)
# 8 bytes (uint64)
self._len = int.from_bytes(
version_numdocs_lendoc0[1:9], 'big', signed=False
)
self._iter = iter(self)
def __iter__(self):
from docarray import Document
from ..io.pbar import get_progressbar
from rich import filesize
with open(self._filename, 'rb') as f:
f.read(9)
pbar, t = get_progressbar(
'Deserializing', disable=not self._show_progress, total=self._len
)
with pbar:
_total_size = 0
pbar.start_task(t)
for _ in range(self._len):
# 4 bytes (uint32)
len_current_doc_in_bytes = int.from_bytes(
f.read(4), 'big', signed=False
)
_total_size += len_current_doc_in_bytes
yield Document.from_bytes(
f.read(len_current_doc_in_bytes),
protocol=self._protocol,
compress=self._compress,
)
pbar.update(
t, advance=1, total_size=str(filesize.decimal(_total_size))
)
def __len__(self):
return self._len
def __getitem__(self, item: list) -> 'DocumentArray':
from docarray import DocumentArray
da = DocumentArray()
for _ in item:
da.append(next(self._iter))
return da
|
#!/usr/bin/env python3
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import os
from Cython import Tempita as tempita
# XXX: If this import ever fails (does it really?), vendor either
# cython.tempita or numpy/npy_tempita.
def process_tempita(fromfile, outfile=None):
"""Process tempita templated file and write out the result.
The template file is expected to end in `.c.tp` or `.pyx.tp`:
E.g. processing `template.c.in` generates `template.c`.
"""
with open(fromfile, "r", encoding="utf-8") as f:
template_content = f.read()
template = tempita.Template(template_content)
content = template.substitute()
with open(outfile, "w", encoding="utf-8") as f:
f.write(content)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("infile", type=str, help="Path to the input file")
parser.add_argument("-o", "--outdir", type=str, help="Path to the output directory")
parser.add_argument(
"-i",
"--ignore",
type=str,
help=(
"An ignored input - may be useful to add a "
"dependency between custom targets"
),
)
args = parser.parse_args()
if not args.infile.endswith(".tp"):
raise ValueError(f"Unexpected extension: {args.infile}")
if not args.outdir:
raise ValueError("Missing `--outdir` argument to tempita.py")
outdir_abs = os.path.join(os.getcwd(), args.outdir)
outfile = os.path.join(
outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0]
)
process_tempita(args.infile, outfile)
if __name__ == "__main__":
main()
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import os
from Cython import Tempita as tempita
# XXX: If this import ever fails (does it really?), vendor either
# cython.tempita or numpy/npy_tempita.
def process_tempita(fromfile, outfile=None):
"""Process tempita templated file and write out the result.
The template file is expected to end in `.c.tp` or `.pyx.tp`:
E.g. processing `template.c.in` generates `template.c`.
"""
with open(fromfile, "r", encoding="utf-8") as f:
template_content = f.read()
template = tempita.Template(template_content)
content = template.substitute()
with open(outfile, "w", encoding="utf-8") as f:
f.write(content)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("infile", type=str, help="Path to the input file")
parser.add_argument("-o", "--outdir", type=str, help="Path to the output directory")
parser.add_argument(
"-i",
"--ignore",
type=str,
help=(
"An ignored input - may be useful to add a "
"dependency between custom targets"
),
)
args = parser.parse_args()
if not args.infile.endswith(".tp"):
raise ValueError(f"Unexpected extension: {args.infile}")
if not args.outdir:
raise ValueError("Missing `--outdir` argument to tempita.py")
outdir_abs = os.path.join(os.getcwd(), args.outdir)
outfile = os.path.join(
outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0]
)
process_tempita(args.infile, outfile)
if __name__ == "__main__":
main()
|
import pytest
from backend.util.request import validate_url
def test_validate_url():
# Rejected IP ranges
with pytest.raises(ValueError):
validate_url("localhost", [])
with pytest.raises(ValueError):
validate_url("192.168.1.1", [])
with pytest.raises(ValueError):
validate_url("127.0.0.1", [])
with pytest.raises(ValueError):
validate_url("0.0.0.0", [])
# Normal URLs
assert validate_url("google.com/a?b=c", []) == "http://google.com/a?b=c"
assert validate_url("github.com?key=!@!@", []) == "http://github.com?key=!@!@"
# Scheme Enforcement
with pytest.raises(ValueError):
validate_url("ftp://example.com", [])
with pytest.raises(ValueError):
validate_url("file://example.com", [])
# International domain that converts to punycode - should be allowed if public
assert validate_url("http://xn--exmple-cua.com", []) == "http://xn--exmple-cua.com"
# If the domain fails IDNA encoding or is invalid, it should raise an error
with pytest.raises(ValueError):
validate_url("http://exa◌mple.com", [])
# IPv6 Addresses
with pytest.raises(ValueError):
validate_url("::1", []) # IPv6 loopback should be blocked
with pytest.raises(ValueError):
validate_url("http://[::1]", []) # IPv6 loopback in URL form
# Suspicious Characters in Hostname
with pytest.raises(ValueError):
validate_url("http://example_underscore.com", [])
with pytest.raises(ValueError):
validate_url("http://exa mple.com", []) # Space in hostname
# Malformed URLs
with pytest.raises(ValueError):
validate_url("http://", []) # No hostname
with pytest.raises(ValueError):
validate_url("://missing-scheme", []) # Missing proper scheme
# Trusted Origins
trusted = ["internal-api.company.com", "10.0.0.5"]
assert (
validate_url("internal-api.company.com", trusted)
== "http://internal-api.company.com"
)
assert validate_url("10.0.0.5", ["10.0.0.5"]) == "http://10.0.0.5"
# Special Characters in Path or Query
assert (
validate_url("example.com/path%20with%20spaces", [])
== "http://example.com/path%20with%20spaces"
)
# Backslashes should be replaced with forward slashes
assert (
validate_url("http://example.com\\backslash", [])
== "http://example.com/backslash"
)
# Check defaulting scheme behavior for valid domains
assert validate_url("example.com", []) == "http://example.com"
assert validate_url("https://secure.com", []) == "https://secure.com"
# Non-ASCII Characters in Query/Fragment
assert validate_url("example.com?param=äöü", []) == "http://example.com?param=äöü"
|
import pytest
from backend.util.request import validate_url
def test_validate_url():
with pytest.raises(ValueError):
validate_url("localhost", [])
with pytest.raises(ValueError):
validate_url("192.168.1.1", [])
with pytest.raises(ValueError):
validate_url("127.0.0.1", [])
with pytest.raises(ValueError):
validate_url("0.0.0.0", [])
validate_url("google.com", [])
validate_url("github.com", [])
validate_url("http://github.com", [])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.