input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
#!/usr/bin/env python3
"""
This script should use a very simple, functional programming style.
Avoid Jinja macros in favor of native Python functions.
Don't go overboard on code generation; use Python only to generate
content that can't be easily declared statically using CircleCI's YAML API.
Data declarations (e.g. the nested loops for defining the configuration matrix)
should be at the top of the file for easy updating.
See this comment for design rationale:
https://github.com/pytorch/vision/pull/1321#issuecomment-531033978
"""
import os.path
import jinja2
import yaml
from jinja2 import select_autoescape
PYTHON_VERSIONS = ["3.8", "3.9", "3.10", "3.11"]
def build_download_job(filter_branch):
job = {
"name": "download_third_parties",
}
if filter_branch:
job["filters"] = gen_filter_branch_tree(filter_branch)
return [{"download_third_parties": job}]
def build_ffmpeg_job(os_type, filter_branch):
job = {
"name": f"build_ffmpeg_{os_type}",
"requires": ["download_third_parties"],
}
if filter_branch:
job["filters"] = gen_filter_branch_tree(filter_branch)
job["python_version"] = "foo"
return [{f"build_ffmpeg_{os_type}": job}]
def gen_filter_branch_tree(*branches):
return {
"branches": {
"only": list(branches),
},
"tags": {
# Using a raw string here to avoid having to escape
# anything
"only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
},
}
def indent(indentation, data_list):
return ("\n" + " " * indentation).join(yaml.dump(data_list).splitlines())
def unittest_python_versions(os):
return {
"windows": PYTHON_VERSIONS[:1],
"macos": PYTHON_VERSIONS[:1],
"linux": PYTHON_VERSIONS,
}.get(os)
if __name__ == "__main__":
d = os.path.dirname(__file__)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(d),
lstrip_blocks=True,
autoescape=select_autoescape(enabled_extensions=("html", "xml")),
)
with open(os.path.join(d, "config.yml"), "w") as f:
f.write(env.get_template("config.yml.in").render())
f.write("\n")
|
#!/usr/bin/env python3
"""
This script should use a very simple, functional programming style.
Avoid Jinja macros in favor of native Python functions.
Don't go overboard on code generation; use Python only to generate
content that can't be easily declared statically using CircleCI's YAML API.
Data declarations (e.g. the nested loops for defining the configuration matrix)
should be at the top of the file for easy updating.
See this comment for design rationale:
https://github.com/pytorch/vision/pull/1321#issuecomment-531033978
"""
import os.path
import jinja2
import yaml
from jinja2 import select_autoescape
PYTHON_VERSIONS = ["3.8", "3.9", "3.10", "3.11"]
def build_download_job(filter_branch):
job = {
"name": "download_third_parties",
}
if filter_branch:
job["filters"] = gen_filter_branch_tree(filter_branch)
return [{"download_third_parties": job}]
def build_ffmpeg_job(os_type, filter_branch):
job = {
"name": f"build_ffmpeg_{os_type}",
"requires": ["download_third_parties"],
}
if filter_branch:
job["filters"] = gen_filter_branch_tree(filter_branch)
job["python_version"] = "foo"
return [{f"build_ffmpeg_{os_type}": job}]
def gen_filter_branch_tree(*branches):
return {
"branches": {
"only": list(branches),
},
"tags": {
# Using a raw string here to avoid having to escape
# anything
"only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/"
},
}
def indent(indentation, data_list):
return ("\n" + " " * indentation).join(yaml.dump(data_list).splitlines())
def unittest_python_versions(os):
return {
"windows": PYTHON_VERSIONS[:1],
"macos": PYTHON_VERSIONS[:1],
"linux": PYTHON_VERSIONS,
}.get(os)
def unittest_workflows(indentation=6):
jobs = []
jobs += build_download_job(None)
for os_type in ["linux", "macos"]:
for device_type in ["cpu"]:
for i, python_version in enumerate(unittest_python_versions(os_type)):
job = {
"name": f"unittest_{os_type}_{device_type}_py{python_version}",
"python_version": python_version,
"cuda_version": "cpu" if device_type == "cpu" else "cu118",
"requires": ["download_third_parties"],
}
jobs.append({f"unittest_{os_type}_{device_type}": job})
if i == 0 and os_type == "linux" and device_type == "cpu":
jobs.append(
{
"stylecheck": {
"name": f"stylecheck_py{python_version}",
"python_version": python_version,
"cuda_version": "cpu",
}
}
)
return indent(indentation, jobs)
if __name__ == "__main__":
d = os.path.dirname(__file__)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(d),
lstrip_blocks=True,
autoescape=select_autoescape(enabled_extensions=("html", "xml")),
)
with open(os.path.join(d, "config.yml"), "w") as f:
f.write(
env.get_template("config.yml.in").render(
unittest_workflows=unittest_workflows,
)
)
f.write("\n")
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Optional
from mmengine.structures import InstanceData
class BaseAssigner(metaclass=ABCMeta):
"""Base assigner that assigns boxes to ground truth boxes."""
@abstractmethod
def assign(self,
pred_instances: InstanceData,
gt_instances: InstanceData,
gt_instances_ignore: Optional[InstanceData] = None,
**kwargs):
"""Assign boxes to either a ground truth boxes or a negative boxes."""
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Optional
from mmengine.data import InstanceData
class BaseAssigner(metaclass=ABCMeta):
"""Base assigner that assigns boxes to ground truth boxes."""
@abstractmethod
def assign(self,
pred_instances: InstanceData,
gt_instances: InstanceData,
gt_instances_ignore: Optional[InstanceData] = None,
**kwargs):
"""Assign boxes to either a ground truth boxes or a negative boxes."""
|
_base_ = ['./ld_r18-gflv1-r101_fpn_1x_coco.py']
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa
model = dict(
teacher_config='configs/gfl/gfl_r101-dconv-c3-c5_fpn_ms-2x_coco.py',
teacher_ckpt=teacher_ckpt,
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5))
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = ['./ld_r18_gflv1_r101_fpn_coco_1x.py']
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth' # noqa
model = dict(
teacher_config='configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py',
teacher_ckpt=teacher_ckpt,
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5))
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import shutil
import tempfile
import unittest
from transformers.testing_utils import require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import (
AutoProcessor,
LlavaOnevisionImageProcessor,
LlavaOnevisionProcessor,
LlavaOnevisionVideoProcessor,
Qwen2TokenizerFast,
)
if is_torch_available:
pass
@require_vision
class LlavaOnevisionProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = LlavaOnevisionProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = LlavaOnevisionImageProcessor()
video_processor = LlavaOnevisionVideoProcessor()
tokenizer = Qwen2TokenizerFast.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
tokenizer.add_special_tokens({"additional_special_tokens": ["<image>", "<video>"]})
processor_kwargs = cls.prepare_processor_dict()
processor = LlavaOnevisionProcessor(
video_processor=video_processor, image_processor=image_processor, tokenizer=tokenizer, **processor_kwargs
)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
cls.video_token = processor.video_token
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def get_video_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
@staticmethod
def prepare_processor_dict():
return {
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + ' '}}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>' }}{% endfor %}{# Render all video then #}{% for content in message['content'] | selectattr('type', 'equalto', 'video') %}{{ '<video>' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ '\n' + content['text'] }}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ '\n' + content['text'] }}{% endgeneration %}{% endfor %}{% endif %}{{'<|im_end|>'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"num_image_tokens": 6,
"vision_feature_select_strategy": "default"
} # fmt: skip
# Copied from tests.models.llava.test_processor_llava.LlavaProcessorTest.test_chat_template_is_saved
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded.keys())
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import shutil
import tempfile
import unittest
from transformers.testing_utils import require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import (
AutoProcessor,
LlavaOnevisionImageProcessor,
LlavaOnevisionProcessor,
LlavaOnevisionVideoProcessor,
Qwen2TokenizerFast,
)
if is_torch_available:
pass
@require_vision
class LlavaOnevisionProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = LlavaOnevisionProcessor
@classmethod
def setUpClass(cls):
cls.tmpdirname = tempfile.mkdtemp()
image_processor = LlavaOnevisionImageProcessor()
video_processor = LlavaOnevisionVideoProcessor()
tokenizer = Qwen2TokenizerFast.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
processor_kwargs = cls.prepare_processor_dict()
processor = LlavaOnevisionProcessor(
video_processor=video_processor, image_processor=image_processor, tokenizer=tokenizer, **processor_kwargs
)
processor.save_pretrained(cls.tmpdirname)
cls.image_token = processor.image_token
cls.video_token = processor.video_token
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def get_video_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdirname, ignore_errors=True)
@staticmethod
def prepare_processor_dict():
return {
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + ' '}}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>' }}{% endfor %}{# Render all video then #}{% for content in message['content'] | selectattr('type', 'equalto', 'video') %}{{ '<video>' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ '\n' + content['text'] }}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ '\n' + content['text'] }}{% endgeneration %}{% endfor %}{% endif %}{{'<|im_end|>'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
"num_image_tokens": 6,
"vision_feature_select_strategy": "default"
} # fmt: skip
# Copied from tests.models.llava.test_processor_llava.LlavaProcessorTest.test_chat_template_is_saved
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded.keys())
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
|
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import Decompressor, IterDataPipe, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "usps"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class USPS(Dataset):
"""USPS Dataset
homepage="https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass"
_RESOURCES = {
"train": HttpResource(
f"{_URL}/usps.bz2", sha256="3771e9dd6ba685185f89867b6e249233dd74652389f263963b3b741e994b034f"
),
"test": HttpResource(
f"{_URL}/usps.t.bz2", sha256="a9c0164e797d60142a50604917f0baa604f326e9a689698763793fa5d12ffc4e"
),
}
def _resources(self) -> List[OnlineResource]:
return [USPS._RESOURCES[self._split]]
def _prepare_sample(self, line: str) -> Dict[str, Any]:
label, *values = line.strip().split(" ")
values = [float(value.split(":")[1]) for value in values]
pixels = torch.tensor(values).add_(1).div_(2)
return dict(
image=Image(pixels.reshape(16, 16)),
label=Label(int(label) - 1, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = Decompressor(resource_dps[0])
dp = LineReader(dp, decode=True, return_path=False)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 7_291 if self._split == "train" else 2_007
|
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import Decompressor, IterDataPipe, LineReader, Mapper
from torchvision.datapoints import Image
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from .._api import register_dataset, register_info
NAME = "usps"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class USPS(Dataset):
"""USPS Dataset
homepage="https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass"
_RESOURCES = {
"train": HttpResource(
f"{_URL}/usps.bz2", sha256="3771e9dd6ba685185f89867b6e249233dd74652389f263963b3b741e994b034f"
),
"test": HttpResource(
f"{_URL}/usps.t.bz2", sha256="a9c0164e797d60142a50604917f0baa604f326e9a689698763793fa5d12ffc4e"
),
}
def _resources(self) -> List[OnlineResource]:
return [USPS._RESOURCES[self._split]]
def _prepare_sample(self, line: str) -> Dict[str, Any]:
label, *values = line.strip().split(" ")
values = [float(value.split(":")[1]) for value in values]
pixels = torch.tensor(values).add_(1).div_(2)
return dict(
image=Image(pixels.reshape(16, 16)),
label=Label(int(label) - 1, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = Decompressor(resource_dps[0])
dp = LineReader(dp, decode=True, return_path=False)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 7_291 if self._split == "train" else 2_007
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import logging
import os
import shutil
from typing import Callable, List, Tuple
import torch
from torch import Tensor
def save_checkpoint(state, is_best, filename):
r"""Save the model to a temporary file first, then copy it to filename,
in case signals interrupt the torch.save() process.
"""
torch.save(state, filename)
logging.info(f"Checkpoint saved to {filename}")
if is_best:
path, best_filename = os.path.split(filename)
best_filename = os.path.join(path, "best_" + best_filename)
shutil.copyfile(filename, best_filename)
logging.info(f"Current best checkpoint saved to {best_filename}")
def pad_sequences(batch: List[Tensor]) -> Tuple[Tensor, Tensor]:
r"""Right zero-pad all one-hot text sequences to max input length.
Modified from https://github.com/NVIDIA/DeepLearningExamples.
"""
input_lengths, ids_sorted_decreasing = torch.sort(torch.LongTensor([len(x) for x in batch]), dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]]
text_padded[i, : text.size(0)] = text
return text_padded, input_lengths
def prepare_input_sequence(texts: List[str], text_processor: Callable[[str], List[int]]) -> Tuple[Tensor, Tensor]:
d = []
for text in texts:
d.append(torch.IntTensor(text_processor(text)[:]))
text_padded, input_lengths = pad_sequences(d)
return text_padded, input_lengths
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import logging
import os
import shutil
from typing import List, Tuple, Callable
import torch
from torch import Tensor
def save_checkpoint(state, is_best, filename):
r"""Save the model to a temporary file first, then copy it to filename,
in case signals interrupt the torch.save() process.
"""
torch.save(state, filename)
logging.info(f"Checkpoint saved to {filename}")
if is_best:
path, best_filename = os.path.split(filename)
best_filename = os.path.join(path, "best_" + best_filename)
shutil.copyfile(filename, best_filename)
logging.info(f"Current best checkpoint saved to {best_filename}")
def pad_sequences(batch: List[Tensor]) -> Tuple[Tensor, Tensor]:
r"""Right zero-pad all one-hot text sequences to max input length.
Modified from https://github.com/NVIDIA/DeepLearningExamples.
"""
input_lengths, ids_sorted_decreasing = torch.sort(torch.LongTensor([len(x) for x in batch]), dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]]
text_padded[i, : text.size(0)] = text
return text_padded, input_lengths
def prepare_input_sequence(texts: List[str], text_processor: Callable[[str], List[int]]) -> Tuple[Tensor, Tensor]:
d = []
for text in texts:
d.append(torch.IntTensor(text_processor(text)[:]))
text_padded, input_lengths = pad_sequences(d)
return text_padded, input_lengths
|
import os
import numpy as np
import keras
from keras.src import testing
from keras.src.saving.file_editor import KerasFileEditor
def get_source_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
def get_target_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
x = keras.layers.Dense(3, name="myotherdense")(x)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
class SavingTest(testing.TestCase):
def test_basics(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = get_source_model()
model.save(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
target_model = get_target_model()
out = editor.compare_to(model) # Succeeds
self.assertEqual(out["status"], "success")
out = editor.compare_to(target_model) # Fails
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 2)
editor.rename_object("dense_3", "dense_4")
editor.rename_object("layers/dense_4", "dense_2")
editor.add_weights("dense_2", weights={"1": np.random.random((3,))})
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_object("layers/dense_3")
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.summary()
temp_filepath = os.path.join(self.get_temp_dir(), "resaved.weights.h5")
editor.resave_weights(temp_filepath)
target_model.load_weights(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.delete_weight("dense_2", "1")
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.add_weights("dense_2", {"1": np.zeros((7,))})
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_weight("dense_2", "1")
editor.add_weights("dense_2", {"1": np.zeros((3,))})
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
|
import os
import numpy as np
import keras
from keras.src import testing
from keras.src.saving.file_editor import KerasFileEditor
def get_source_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
def get_target_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
x = keras.layers.Dense(3, name="myotherdense")(x)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
class SavingTest(testing.TestCase):
def test_basics(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = get_source_model()
model.save(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
target_model = get_target_model()
out = editor.compare_to(model) # Succeeds
self.assertEqual(out["status"], "success")
out = editor.compare_to(target_model) # Fails
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 2)
editor.rename_object("dense_3", "dense_4")
editor.rename_object("layers/dense_4", "dense_2")
editor.add_weights("dense_2", weights={"1": np.random.random((3,))})
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare_to(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_object("layers/dense_3")
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.summary()
temp_filepath = os.path.join(self.get_temp_dir(), "resaved.weights.h5")
editor.resave_weights(temp_filepath)
target_model.load_weights(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
out = editor.compare_to(target_model) # Succeeds
self.assertEqual(out["status"], "success")
|
"""Query Rewriting Retriever Pack."""
from typing import Any, Dict, List
from llama_index.core import Settings
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import QueryFusionRetriever
from llama_index.core.schema import TextNode
class QueryRewritingRetrieverPack(BaseLlamaPack):
"""
Query rewriting retriever pack.
Given input nodes, build a vector index.
Then rewrite the query into multiple queries and
rerank the results.
"""
def __init__(
self,
nodes: List[TextNode] = None,
chunk_size: int = 256,
mode: str = "reciprocal_rerank",
vector_similarity_top_k: int = 2,
fusion_similarity_top_k: int = 2,
num_queries: int = 4,
**kwargs: Any,
) -> None:
"""Init params."""
Settings.chunk_size = chunk_size
index = VectorStoreIndex(nodes)
self.vector_retriever = index.as_retriever(
similarity_top_k=vector_similarity_top_k
)
self.fusion_retriever = QueryFusionRetriever(
[self.vector_retriever],
similarity_top_k=fusion_similarity_top_k,
num_queries=num_queries, # set this to 1 to disable query generation
mode=mode,
use_async=True,
verbose=True,
# query_gen_prompt="...", # we could override the query generation prompt here
)
self.query_engine = RetrieverQueryEngine.from_args(self.fusion_retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_retriever": self.vector_retriever,
"fusion_retriever": self.fusion_retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.fusion_retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
"""Query Rewriting Retriever Pack."""
from typing import Any, Dict, List
from llama_index.core import Settings
from llama_index.core.indices.vector_store import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import QueryFusionRetriever
from llama_index.core.schema import TextNode
class QueryRewritingRetrieverPack(BaseLlamaPack):
"""Query rewriting retriever pack.
Given input nodes, build a vector index.
Then rewrite the query into multiple queries and
rerank the results.
"""
def __init__(
self,
nodes: List[TextNode] = None,
chunk_size: int = 256,
mode: str = "reciprocal_rerank",
vector_similarity_top_k: int = 2,
fusion_similarity_top_k: int = 2,
num_queries: int = 4,
**kwargs: Any,
) -> None:
"""Init params."""
Settings.chunk_size = chunk_size
index = VectorStoreIndex(nodes)
self.vector_retriever = index.as_retriever(
similarity_top_k=vector_similarity_top_k
)
self.fusion_retriever = QueryFusionRetriever(
[self.vector_retriever],
similarity_top_k=fusion_similarity_top_k,
num_queries=num_queries, # set this to 1 to disable query generation
mode=mode,
use_async=True,
verbose=True,
# query_gen_prompt="...", # we could override the query generation prompt here
)
self.query_engine = RetrieverQueryEngine.from_args(self.fusion_retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_retriever": self.vector_retriever,
"fusion_retriever": self.fusion_retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.fusion_retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
import io
import warnings
from abc import ABC
import numpy as np
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> bytes:
"""
Convert image tensor to bytes.
:param format: the image format use to store the image, can be 'PNG' , 'JPG' ...
:return: bytes
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
if format == 'jpg':
format = 'jpeg' # unify it to ISO standard
tensor = self.get_comp_backend().to_numpy(self)
mode = 'RGB' if tensor.ndim == 3 else 'L'
pil_image = PILImage.fromarray(tensor, mode=mode)
with io.BytesIO() as buffer:
pil_image.save(buffer, format=format)
img_byte_arr = buffer.getvalue()
return img_byte_arr
def save(self, file_path: str) -> None:
"""
Save image tensor to an image file.
:param file_path: path to an image file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
comp_backend = self.get_comp_backend()
np_img = comp_backend.to_numpy(self).astype(np.uint8)
pil_img = PILImage.fromarray(np_img)
pil_img.save(file_path)
def display(self) -> None:
"""
Display image data from tensor in notebook.
"""
if is_notebook():
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
np_array = self.get_comp_backend().to_numpy(self)
img = PILImage.fromarray(np_array)
from IPython.display import display
display(img)
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import io
import warnings
from abc import ABC
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library, is_notebook
class AbstractImageTensor(AbstractTensor, ABC):
def to_bytes(self, format: str = 'PNG') -> bytes:
"""
Convert image tensor to bytes.
:param format: the image format use to store the image, can be 'PNG' , 'JPG' ...
:return: bytes
"""
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
if format == 'jpg':
format = 'jpeg' # unify it to ISO standard
tensor = self.get_comp_backend().to_numpy(self)
mode = 'RGB' if tensor.ndim == 3 else 'L'
pil_image = PILImage.fromarray(tensor, mode=mode)
with io.BytesIO() as buffer:
pil_image.save(buffer, format=format)
img_byte_arr = buffer.getvalue()
return img_byte_arr
def display(self) -> None:
"""
Display image data from tensor in notebook.
"""
if is_notebook():
PIL = import_library('PIL', raise_error=True) # noqa: F841
from PIL import Image as PILImage
np_array = self.get_comp_backend().to_numpy(self)
img = PILImage.fromarray(np_array)
from IPython.display import display
display(img)
else:
warnings.warn('Display of image is only possible in a notebook.')
|
from torchaudio._internal import module_utils as _mod_utils
from . import sox_utils
from .download import download_asset
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(1)
__all__ = [
"download_asset",
"sox_utils",
]
|
from torchaudio._internal import module_utils as _mod_utils
from . import (
sox_utils,
)
from .download import download_asset
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(1)
__all__ = [
"download_asset",
"sox_utils",
]
|
from .transform_encoder import TransformerTorchEncoder
|
from .transform_encoder import TransformerTorchEncoder
|
from typing import Optional, TYPE_CHECKING
import numpy as np
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri, _is_datauri
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class ConvertMixin:
"""Provide helper functions for :class:`Document` to support conversion between :attr:`.tensor`, :attr:`.text`
and :attr:`.blob`."""
def convert_blob_to_tensor(
self: 'T', dtype: Optional[str] = None, count: int = -1, offset: int = 0
) -> 'T':
"""Assuming the :attr:`blob` is a _valid_ buffer of Numpy ndarray,
set :attr:`tensor` accordingly.
:param dtype: Data-type of the returned array; default: float.
:param count: Number of items to read. ``-1`` means all data in the buffer.
:param offset: Start reading the buffer from this offset (in bytes); default: 0.
:return: itself after processed
"""
self.tensor = np.frombuffer(self.blob, dtype=dtype, count=count, offset=offset)
return self
def convert_tensor_to_blob(self: 'T') -> 'T':
"""Convert :attr:`.tensor` to :attr:`.blob` inplace.
:return: itself after processed
"""
self.blob = self.tensor.tobytes()
return self
def convert_uri_to_datauri(
self: 'T', charset: str = 'utf-8', base64: bool = False
) -> 'T':
"""Convert :attr:`.uri` to dataURI and store it in :attr:`.uri` inplace.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit. Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that frequently uses non-US-ASCII characters.
:return: itself after processed
"""
if not _is_datauri(self.uri):
blob = _uri_to_blob(self.uri)
self.uri = _to_datauri(self.mime_type, blob, charset, base64, binary=True)
return self
|
from typing import Optional, TYPE_CHECKING
import numpy as np
from docarray.document.mixins.helper import _uri_to_blob, _to_datauri, _is_datauri
if TYPE_CHECKING:
from docarray.typing import T
class ConvertMixin:
"""Provide helper functions for :class:`Document` to support conversion between :attr:`.tensor`, :attr:`.text`
and :attr:`.blob`."""
def convert_blob_to_tensor(
self: 'T', dtype: Optional[str] = None, count: int = -1, offset: int = 0
) -> 'T':
"""Assuming the :attr:`blob` is a _valid_ buffer of Numpy ndarray,
set :attr:`tensor` accordingly.
:param dtype: Data-type of the returned array; default: float.
:param count: Number of items to read. ``-1`` means all data in the buffer.
:param offset: Start reading the buffer from this offset (in bytes); default: 0.
:return: itself after processed
"""
self.tensor = np.frombuffer(self.blob, dtype=dtype, count=count, offset=offset)
return self
def convert_tensor_to_blob(self: 'T') -> 'T':
"""Convert :attr:`.tensor` to :attr:`.blob` inplace.
:return: itself after processed
"""
self.blob = self.tensor.tobytes()
return self
def convert_uri_to_datauri(
self: 'T', charset: str = 'utf-8', base64: bool = False
) -> 'T':
"""Convert :attr:`.uri` to dataURI and store it in :attr:`.uri` inplace.
:param charset: charset may be any character set registered with IANA
:param base64: used to encode arbitrary octet sequences into a form that satisfies the rules of 7bit. Designed to be efficient for non-text 8 bit and binary data. Sometimes used for text data that frequently uses non-US-ASCII characters.
:return: itself after processed
"""
if not _is_datauri(self.uri):
blob = _uri_to_blob(self.uri)
self.uri = _to_datauri(self.mime_type, blob, charset, base64, binary=True)
return self
|
# Copyright (c) OpenMMLab. All rights reserved.
from enum import Enum
from typing import Union
class Priority(Enum):
"""Hook priority levels.
+--------------+------------+
| Level | Value |
+==============+============+
| HIGHEST | 0 |
+--------------+------------+
| VERY_HIGH | 10 |
+--------------+------------+
| HIGH | 30 |
+--------------+------------+
| ABOVE_NORMAL | 40 |
+--------------+------------+
| NORMAL | 50 |
+--------------+------------+
| BELOW_NORMAL | 60 |
+--------------+------------+
| LOW | 70 |
+--------------+------------+
| VERY_LOW | 90 |
+--------------+------------+
| LOWEST | 100 |
+--------------+------------+
"""
HIGHEST = 0
VERY_HIGH = 10
HIGH = 30
ABOVE_NORMAL = 40
NORMAL = 50
BELOW_NORMAL = 60
LOW = 70
VERY_LOW = 90
LOWEST = 100
def get_priority(priority: Union[int, str, Priority]) -> int:
"""Get priority value.
Args:
priority (int or str or :obj:`Priority`): Priority.
Returns:
int: The priority value.
"""
if isinstance(priority, int):
if priority < 0 or priority > 100:
raise ValueError('priority must be between 0 and 100')
return priority
elif isinstance(priority, Priority):
return priority.value
elif isinstance(priority, str):
return Priority[priority.upper()].value
else:
raise TypeError('priority must be an integer or Priority enum value')
|
# Copyright (c) OpenMMLab. All rights reserved.
from enum import Enum
from typing import Union
class Priority(Enum):
"""Hook priority levels.
+--------------+------------+
| Level | Value |
+==============+============+
| HIGHEST | 0 |
+--------------+------------+
| VERY_HIGH | 10 |
+--------------+------------+
| HIGH | 30 |
+--------------+------------+
| ABOVE_NORMAL | 40 |
+--------------+------------+
| NORMAL | 50 |
+--------------+------------+
| BELOW_NORMAL | 60 |
+--------------+------------+
| LOW | 70 |
+--------------+------------+
| VERY_LOW | 90 |
+--------------+------------+
| LOWEST | 100 |
+--------------+------------+
"""
HIGHEST = 0
VERY_HIGH = 10
HIGH = 30
ABOVE_NORMAL = 40
NORMAL = 50
BELOW_NORMAL = 60
LOW = 70
VERY_LOW = 90
LOWEST = 100
def get_priority(priority: Union[int, str, Priority]) -> int:
"""Get priority value.
Args:
priority (int or str or :obj:`Priority`): Priority.
Returns:
int: The priority value.
"""
if isinstance(priority, int):
if priority < 0 or priority > 100:
raise ValueError('priority must be between 0 and 100')
return priority
elif isinstance(priority, Priority):
return priority.value
elif isinstance(priority, str):
return Priority[priority.upper()].value
else:
raise TypeError('priority must be an integer or Priority enum value')
|
"""Evaluator."""
from abc import abstractmethod
from typing import Any, Optional, Sequence
from llama_index.core.async_utils import asyncio_run
from llama_index.core.base.response.schema import Response
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType
class EvaluationResult(BaseModel):
"""
Evaluation result.
Output of an BaseEvaluator.
"""
query: Optional[str] = Field(default=None, description="Query string")
contexts: Optional[Sequence[str]] = Field(
default=None, description="Context strings"
)
response: Optional[str] = Field(default=None, description="Response string")
passing: Optional[bool] = Field(
default=None, description="Binary evaluation result (passing or not)"
)
feedback: Optional[str] = Field(
default=None, description="Feedback or reasoning for the response"
)
score: Optional[float] = Field(default=None, description="Score for the response")
pairwise_source: Optional[str] = Field(
default=None,
description=(
"Used only for pairwise and specifies whether it is from original order of"
" presented answers or flipped order"
),
)
invalid_result: bool = Field(
default=False, description="Whether the evaluation result is an invalid one."
)
invalid_reason: Optional[str] = Field(
default=None, description="Reason for invalid evaluation."
)
class BaseEvaluator(PromptMixin):
"""Base Evaluator class."""
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt modules."""
return {}
def evaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
"""
Run evaluation with query string, retrieved contexts,
and generated response string.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
return asyncio_run(
self.aevaluate(
query=query,
response=response,
contexts=contexts,
**kwargs,
)
)
@abstractmethod
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
"""
Run evaluation with query string, retrieved contexts,
and generated response string.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
raise NotImplementedError
def evaluate_response(
self,
query: Optional[str] = None,
response: Optional[Response] = None,
**kwargs: Any,
) -> EvaluationResult:
"""
Run evaluation with query string and generated Response object.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
response_str: Optional[str] = None
contexts: Optional[Sequence[str]] = None
if response is not None:
response_str = response.response
contexts = [node.get_content() for node in response.source_nodes]
return self.evaluate(
query=query, response=response_str, contexts=contexts, **kwargs
)
async def aevaluate_response(
self,
query: Optional[str] = None,
response: Optional[Response] = None,
**kwargs: Any,
) -> EvaluationResult:
"""
Run evaluation with query string and generated Response object.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
response_str: Optional[str] = None
contexts: Optional[Sequence[str]] = None
if response is not None:
response_str = response.response
contexts = [node.get_content() for node in response.source_nodes]
return await self.aevaluate(
query=query, response=response_str, contexts=contexts, **kwargs
)
# legacy: backward compatibility
Evaluation = EvaluationResult
|
"""Evaluator."""
from abc import abstractmethod
from typing import Any, Optional, Sequence
from llama_index.core.async_utils import asyncio_run
from llama_index.core.base.response.schema import Response
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType
class EvaluationResult(BaseModel):
"""Evaluation result.
Output of an BaseEvaluator.
"""
query: Optional[str] = Field(default=None, description="Query string")
contexts: Optional[Sequence[str]] = Field(
default=None, description="Context strings"
)
response: Optional[str] = Field(default=None, description="Response string")
passing: Optional[bool] = Field(
default=None, description="Binary evaluation result (passing or not)"
)
feedback: Optional[str] = Field(
default=None, description="Feedback or reasoning for the response"
)
score: Optional[float] = Field(default=None, description="Score for the response")
pairwise_source: Optional[str] = Field(
default=None,
description=(
"Used only for pairwise and specifies whether it is from original order of"
" presented answers or flipped order"
),
)
invalid_result: bool = Field(
default=False, description="Whether the evaluation result is an invalid one."
)
invalid_reason: Optional[str] = Field(
default=None, description="Reason for invalid evaluation."
)
class BaseEvaluator(PromptMixin):
"""Base Evaluator class."""
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt modules."""
return {}
def evaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
"""Run evaluation with query string, retrieved contexts,
and generated response string.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
return asyncio_run(
self.aevaluate(
query=query,
response=response,
contexts=contexts,
**kwargs,
)
)
@abstractmethod
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
"""Run evaluation with query string, retrieved contexts,
and generated response string.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
raise NotImplementedError
def evaluate_response(
self,
query: Optional[str] = None,
response: Optional[Response] = None,
**kwargs: Any,
) -> EvaluationResult:
"""Run evaluation with query string and generated Response object.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
response_str: Optional[str] = None
contexts: Optional[Sequence[str]] = None
if response is not None:
response_str = response.response
contexts = [node.get_content() for node in response.source_nodes]
return self.evaluate(
query=query, response=response_str, contexts=contexts, **kwargs
)
async def aevaluate_response(
self,
query: Optional[str] = None,
response: Optional[Response] = None,
**kwargs: Any,
) -> EvaluationResult:
"""Run evaluation with query string and generated Response object.
Subclasses can override this method to provide custom evaluation logic and
take in additional arguments.
"""
response_str: Optional[str] = None
contexts: Optional[Sequence[str]] = None
if response is not None:
response_str = response.response
contexts = [node.get_content() for node in response.source_nodes]
return await self.aevaluate(
query=query, response=response_str, contexts=contexts, **kwargs
)
# legacy: backward compatibility
Evaluation = EvaluationResult
|
from __future__ import annotations
from .model_card import SparseEncoderModelCardData
from .SparseEncoder import SparseEncoder
from .trainer import SparseEncoderTrainer
from .training_args import SparseEncoderTrainingArguments
__all__ = [
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
]
|
from __future__ import annotations
from .model_card import SparseEncoderModelCardData
from .SparseEncoder import SparseEncoder
from .trainer import SparseEncoderTrainer
from .training_args import SparseEncoderTrainingArguments
__all__ = [
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
|
"""Provides the PanelChatPack."""
import os
from typing import Any, Dict
from llama_index.core.llama_pack.base import BaseLlamaPack
ENVIRONMENT_VARIABLES = [
"GITHUB_TOKEN",
"OPENAI_API_KEY",
]
class PanelChatPack(BaseLlamaPack):
"""Panel chatbot pack."""
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
for variable in ENVIRONMENT_VARIABLES:
if variable not in os.environ:
raise ValueError("%s environment variable is not set", variable)
import panel as pn
if __name__ == "__main__":
# 'pytest tests' will fail if app is imported elsewhere
from app import create_chat_ui
pn.serve(create_chat_ui)
elif __name__.startswith("bokeh"):
from app import create_chat_ui
create_chat_ui().servable()
else:
print(
"To serve the Panel ChatBot please run this file with 'panel serve' or 'python'"
)
if __name__.startswith("bokeh") or __name__ == "__main__":
PanelChatPack().run()
|
"""Provides the PanelChatPack."""
import os
from typing import Any, Dict
from llama_index.core.llama_pack.base import BaseLlamaPack
ENVIRONMENT_VARIABLES = [
"GITHUB_TOKEN",
"OPENAI_API_KEY",
]
class PanelChatPack(BaseLlamaPack):
"""Panel chatbot pack."""
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
for variable in ENVIRONMENT_VARIABLES:
if variable not in os.environ:
raise ValueError("%s environment variable is not set", variable)
import panel as pn
if __name__ == "__main__":
# 'pytest tests' will fail if app is imported elsewhere
from app import create_chat_ui
pn.serve(create_chat_ui)
elif __name__.startswith("bokeh"):
from app import create_chat_ui
create_chat_ui().servable()
else:
print(
"To serve the Panel ChatBot please run this file with 'panel serve' or 'python'"
)
if __name__.startswith("bokeh") or __name__ == "__main__":
PanelChatPack().run()
|
from typing import Any
import torch
import enum
from torch._C import _to_dlpack as to_dlpack
__all__ = [
"DLDeviceType",
"from_dlpack",
]
class DLDeviceType(enum.IntEnum):
# Enums as in DLPack specification (aten/src/ATen/dlpack.h)
kDLCPU = 1,
kDLCUDA = 2,
kDLCUDAHost = 3,
kDLOpenCL = 4,
kDLVulkan = 7,
kDLMetal = 8,
kDLVPI = 9,
kDLROCM = 10,
kDLROCMHost = 11,
kDLExtDev = 12,
kDLCUDAManaged = 13,
kDLOneAPI = 14,
kDLWebGPU = 15,
kDLHexagon = 16,
kDLMAIA = 17,
torch._C._add_docstr(to_dlpack, r"""to_dlpack(tensor) -> PyCapsule
Returns an opaque object (a "DLPack capsule") representing the tensor.
.. note::
``to_dlpack`` is a legacy DLPack interface. The capsule it returns
cannot be used for anything in Python other than use it as input to
``from_dlpack``. The more idiomatic use of DLPack is to call
``from_dlpack`` directly on the tensor object - this works when that
object has a ``__dlpack__`` method, which PyTorch and most other
libraries indeed have now.
.. warning::
Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``.
Behavior when a capsule is consumed multiple times is undefined.
Args:
tensor: a tensor to be exported
The DLPack capsule shares the tensor's memory.
""")
# TODO: add a typing.Protocol to be able to tell Mypy that only objects with
# __dlpack__ and __dlpack_device__ methods are accepted.
def from_dlpack(ext_tensor: Any) -> 'torch.Tensor':
"""from_dlpack(ext_tensor) -> Tensor
Converts a tensor from an external library into a ``torch.Tensor``.
The returned PyTorch tensor will share the memory with the input tensor
(which may have come from another library). Note that in-place operations
will therefore also affect the data of the input tensor. This may lead to
unexpected issues (e.g., other libraries may have read-only flags or
immutable data structures), so the user should only do this if they know
for sure that this is fine.
Args:
ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule):
The tensor or DLPack capsule to convert.
If ``ext_tensor`` is a tensor (or ndarray) object, it must support
the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__``
method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is
an opaque ``PyCapsule`` instance, typically produced by a
``to_dlpack`` function or method.
Examples::
>>> import torch.utils.dlpack
>>> t = torch.arange(4)
# Convert a tensor directly (supported in PyTorch >= 1.10)
>>> t2 = torch.from_dlpack(t)
>>> t2[:2] = -1 # show that memory is shared
>>> t2
tensor([-1, -1, 2, 3])
>>> t
tensor([-1, -1, 2, 3])
# The old-style DLPack usage, with an intermediate capsule object
>>> capsule = torch.utils.dlpack.to_dlpack(t)
>>> capsule
<capsule object "dltensor" at ...>
>>> t3 = torch.from_dlpack(capsule)
>>> t3
tensor([-1, -1, 2, 3])
>>> t3[0] = -9 # now we're sharing memory between 3 tensors
>>> t3
tensor([-9, -1, 2, 3])
>>> t2
tensor([-9, -1, 2, 3])
>>> t
tensor([-9, -1, 2, 3])
"""
if hasattr(ext_tensor, '__dlpack__'):
kwargs: dict[str, Any] = {}
kwargs["max_version"] = (1, 0)
device = ext_tensor.__dlpack_device__()
# device is either CUDA or ROCm, we need to pass the current
# stream
if device[0] in (DLDeviceType.kDLCUDA, DLDeviceType.kDLROCM):
stream = torch.cuda.current_stream(f'cuda:{device[1]}')
# cuda_stream is the pointer to the stream and it is a public
# attribute, but it is not documented
# The array API specify that the default legacy stream must be passed
# with a value of 1 for CUDA
# https://data-apis.org/array-api/latest/API_specification/array_object.html?dlpack-self-stream-none#dlpack-self-stream-none
is_cuda = device[0] == DLDeviceType.kDLCUDA
# Since pytorch is not using PTDS by default, lets directly pass
# the legacy stream
stream_ptr = 1 if is_cuda and stream.cuda_stream == 0 else stream.cuda_stream
kwargs["stream"] = stream_ptr
try:
# Try running __dlpack__ while specifying `max_version` argument.
dlpack = ext_tensor.__dlpack__(**kwargs)
except TypeError:
# If that doesn't work, try removing the `max_version` argument.
kwargs.pop("max_version")
dlpack = ext_tensor.__dlpack__(**kwargs)
else:
# Old versions just call the converter
dlpack = ext_tensor
return torch._C._from_dlpack(dlpack)
|
from typing import Any
import torch
import enum
from torch._C import _from_dlpack
from torch._C import _to_dlpack as to_dlpack
__all__ = [
"DLDeviceType",
"from_dlpack",
"to_dlpack",
]
class DLDeviceType(enum.IntEnum):
# Enums as in DLPack specification (aten/src/ATen/dlpack.h)
kDLCPU = 1,
kDLGPU = 2,
kDLCPUPinned = 3,
kDLOpenCL = 4,
kDLVulkan = 7,
kDLMetal = 8,
kDLVPI = 9,
kDLROCM = 10,
kDLExtDev = 12,
kDLOneAPI = 14,
torch._C._add_docstr(to_dlpack, r"""to_dlpack(tensor) -> PyCapsule
Returns an opaque object (a "DLPack capsule") representing the tensor.
.. note::
``to_dlpack`` is a legacy DLPack interface. The capsule it returns
cannot be used for anything in Python other than use it as input to
``from_dlpack``. The more idiomatic use of DLPack is to call
``from_dlpack`` directly on the tensor object - this works when that
object has a ``__dlpack__`` method, which PyTorch and most other
libraries indeed have now.
.. warning::
Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``.
Behavior when a capsule is consumed multiple times is undefined.
Args:
tensor: a tensor to be exported
The DLPack capsule shares the tensor's memory.
""")
# TODO: add a typing.Protocol to be able to tell Mypy that only objects with
# __dlpack__ and __dlpack_device__ methods are accepted.
def from_dlpack(ext_tensor: Any) -> 'torch.Tensor':
"""from_dlpack(ext_tensor) -> Tensor
Converts a tensor from an external library into a ``torch.Tensor``.
The returned PyTorch tensor will share the memory with the input tensor
(which may have come from another library). Note that in-place operations
will therefore also affect the data of the input tensor. This may lead to
unexpected issues (e.g., other libraries may have read-only flags or
immutable data structures), so the user should only do this if they know
for sure that this is fine.
Args:
ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule):
The tensor or DLPack capsule to convert.
If ``ext_tensor`` is a tensor (or ndarray) object, it must support
the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__``
method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is
an opaque ``PyCapsule`` instance, typically produced by a
``to_dlpack`` function or method.
Examples::
>>> import torch.utils.dlpack
>>> t = torch.arange(4)
# Convert a tensor directly (supported in PyTorch >= 1.10)
>>> t2 = torch.from_dlpack(t)
>>> t2[:2] = -1 # show that memory is shared
>>> t2
tensor([-1, -1, 2, 3])
>>> t
tensor([-1, -1, 2, 3])
# The old-style DLPack usage, with an intermediate capsule object
>>> capsule = torch.utils.dlpack.to_dlpack(t)
>>> capsule
<capsule object "dltensor" at ...>
>>> t3 = torch.from_dlpack(capsule)
>>> t3
tensor([-1, -1, 2, 3])
>>> t3[0] = -9 # now we're sharing memory between 3 tensors
>>> t3
tensor([-9, -1, 2, 3])
>>> t2
tensor([-9, -1, 2, 3])
>>> t
tensor([-9, -1, 2, 3])
"""
if hasattr(ext_tensor, '__dlpack__'):
device = ext_tensor.__dlpack_device__()
# device is either CUDA or ROCm, we need to pass the current
# stream
if device[0] in (DLDeviceType.kDLGPU, DLDeviceType.kDLROCM):
stream = torch.cuda.current_stream(f'cuda:{device[1]}')
# cuda_stream is the pointer to the stream and it is a public
# attribute, but it is not documented
# The array API specify that the default legacy stream must be passed
# with a value of 1 for CUDA
# https://data-apis.org/array-api/latest/API_specification/array_object.html?dlpack-self-stream-none#dlpack-self-stream-none
is_cuda = device[0] == DLDeviceType.kDLGPU
# Since pytorch is not using PTDS by default, lets directly pass
# the legacy stream
stream_ptr = 1 if is_cuda and stream.cuda_stream == 0 else stream.cuda_stream
dlpack = ext_tensor.__dlpack__(stream=stream_ptr)
else:
dlpack = ext_tensor.__dlpack__()
else:
# Old versions just call the converter
dlpack = ext_tensor
return _from_dlpack(dlpack)
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.8.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.8.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert MRA checkpoints from the original repository. URL: https://github.com/mlpen/mra-attention"""
import argparse
import torch
from transformers import MraConfig, MraForMaskedLM
def rename_key(orig_key):
if "model" in orig_key:
orig_key = orig_key.replace("model.", "")
if "norm1" in orig_key:
orig_key = orig_key.replace("norm1", "attention.output.LayerNorm")
if "norm2" in orig_key:
orig_key = orig_key.replace("norm2", "output.LayerNorm")
if "norm" in orig_key:
orig_key = orig_key.replace("norm", "LayerNorm")
if "transformer" in orig_key:
layer_num = orig_key.split(".")[0].split("_")[-1]
orig_key = orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}")
if "mha.attn" in orig_key:
orig_key = orig_key.replace("mha.attn", "attention.self")
if "mha" in orig_key:
orig_key = orig_key.replace("mha", "attention")
if "W_q" in orig_key:
orig_key = orig_key.replace("W_q", "self.query")
if "W_k" in orig_key:
orig_key = orig_key.replace("W_k", "self.key")
if "W_v" in orig_key:
orig_key = orig_key.replace("W_v", "self.value")
if "ff.0" in orig_key:
orig_key = orig_key.replace("ff.0", "intermediate.dense")
if "ff.2" in orig_key:
orig_key = orig_key.replace("ff.2", "output.dense")
if "ff" in orig_key:
orig_key = orig_key.replace("ff", "output.dense")
if "mlm_class" in orig_key:
orig_key = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder")
if "mlm" in orig_key:
orig_key = orig_key.replace("mlm", "cls.predictions.transform")
if "backbone.backbone.encoders" in orig_key:
orig_key = orig_key.replace("backbone.backbone.encoders", "encoder.layer")
if "cls" not in orig_key:
orig_key = "mra." + orig_key
return orig_key
def convert_checkpoint_helper(max_position_embeddings, orig_state_dict):
for key in orig_state_dict.copy().keys():
val = orig_state_dict.pop(key)
if ("pooler" in key) or ("sen_class" in key):
continue
else:
orig_state_dict[rename_key(key)] = val
orig_state_dict["cls.predictions.bias"] = orig_state_dict["cls.predictions.decoder.bias"]
orig_state_dict["mra.embeddings.position_ids"] = torch.arange(max_position_embeddings).expand((1, -1)) + 2
return orig_state_dict
def convert_mra_checkpoint(checkpoint_path, mra_config_file, pytorch_dump_path):
orig_state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model_state_dict"]
config = MraConfig.from_json_file(mra_config_file)
model = MraForMaskedLM(config)
new_state_dict = convert_checkpoint_helper(config.max_position_embeddings, orig_state_dict)
print(model.load_state_dict(new_state_dict))
model.eval()
model.save_pretrained(pytorch_dump_path)
print(f"Checkpoint successfully converted. Model saved at {pytorch_dump_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to Mra pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for Mra model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_mra_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert MRA checkpoints from the original repository. URL: https://github.com/mlpen/mra-attention"""
import argparse
import torch
from transformers import MraConfig, MraForMaskedLM
def rename_key(orig_key):
if "model" in orig_key:
orig_key = orig_key.replace("model.", "")
if "norm1" in orig_key:
orig_key = orig_key.replace("norm1", "attention.output.LayerNorm")
if "norm2" in orig_key:
orig_key = orig_key.replace("norm2", "output.LayerNorm")
if "norm" in orig_key:
orig_key = orig_key.replace("norm", "LayerNorm")
if "transformer" in orig_key:
layer_num = orig_key.split(".")[0].split("_")[-1]
orig_key = orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}")
if "mha.attn" in orig_key:
orig_key = orig_key.replace("mha.attn", "attention.self")
if "mha" in orig_key:
orig_key = orig_key.replace("mha", "attention")
if "W_q" in orig_key:
orig_key = orig_key.replace("W_q", "self.query")
if "W_k" in orig_key:
orig_key = orig_key.replace("W_k", "self.key")
if "W_v" in orig_key:
orig_key = orig_key.replace("W_v", "self.value")
if "ff.0" in orig_key:
orig_key = orig_key.replace("ff.0", "intermediate.dense")
if "ff.2" in orig_key:
orig_key = orig_key.replace("ff.2", "output.dense")
if "ff" in orig_key:
orig_key = orig_key.replace("ff", "output.dense")
if "mlm_class" in orig_key:
orig_key = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder")
if "mlm" in orig_key:
orig_key = orig_key.replace("mlm", "cls.predictions.transform")
if "backbone.backbone.encoders" in orig_key:
orig_key = orig_key.replace("backbone.backbone.encoders", "encoder.layer")
if "cls" not in orig_key:
orig_key = "mra." + orig_key
return orig_key
def convert_checkpoint_helper(max_position_embeddings, orig_state_dict):
for key in orig_state_dict.copy().keys():
val = orig_state_dict.pop(key)
if ("pooler" in key) or ("sen_class" in key):
continue
else:
orig_state_dict[rename_key(key)] = val
orig_state_dict["cls.predictions.bias"] = orig_state_dict["cls.predictions.decoder.bias"]
orig_state_dict["mra.embeddings.position_ids"] = torch.arange(max_position_embeddings).expand((1, -1)) + 2
return orig_state_dict
def convert_mra_checkpoint(checkpoint_path, mra_config_file, pytorch_dump_path):
orig_state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model_state_dict"]
config = MraConfig.from_json_file(mra_config_file)
model = MraForMaskedLM(config)
new_state_dict = convert_checkpoint_helper(config.max_position_embeddings, orig_state_dict)
print(model.load_state_dict(new_state_dict))
model.eval()
model.save_pretrained(pytorch_dump_path)
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to Mra pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for Mra model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_mra_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
|
"""
NumPy Array API compatibility library
This is a small wrapper around NumPy, CuPy, JAX, sparse and others that are
compatible with the Array API standard https://data-apis.org/array-api/latest/.
See also NEP 47 https://numpy.org/neps/nep-0047-array-api-standard.html.
Unlike array_api_strict, this is not a strict minimal implementation of the
Array API, but rather just an extension of the main NumPy namespace with
changes needed to be compliant with the Array API. See
https://numpy.org/doc/stable/reference/array_api.html for a full list of
changes. In particular, unlike array_api_strict, this package does not use a
separate Array object, but rather just uses numpy.ndarray directly.
Library authors using the Array API may wish to test against array_api_strict
to ensure they are not using functionality outside of the standard, but prefer
this implementation for the default when working with NumPy arrays.
"""
__version__ = '1.12.0'
from .common import * # noqa: F401, F403
|
"""
NumPy Array API compatibility library
This is a small wrapper around NumPy, CuPy, JAX, sparse and others that are
compatible with the Array API standard https://data-apis.org/array-api/latest/.
See also NEP 47 https://numpy.org/neps/nep-0047-array-api-standard.html.
Unlike array_api_strict, this is not a strict minimal implementation of the
Array API, but rather just an extension of the main NumPy namespace with
changes needed to be compliant with the Array API. See
https://numpy.org/doc/stable/reference/array_api.html for a full list of
changes. In particular, unlike array_api_strict, this package does not use a
separate Array object, but rather just uses numpy.ndarray directly.
Library authors using the Array API may wish to test against array_api_strict
to ensure they are not using functionality outside of the standard, but prefer
this implementation for the default when working with NumPy arrays.
"""
__version__ = '1.11.2'
from .common import * # noqa: F401, F403
|
import json
from typing import List
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
class XataChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in a Xata database."""
def __init__(
self,
session_id: str,
db_url: str,
api_key: str,
branch_name: str = "main",
table_name: str = "messages",
create_table: bool = True,
) -> None:
"""Initialize with Xata client."""
try:
from xata.client import XataClient
except ImportError:
raise ImportError(
"Could not import xata python package. "
"Please install it with `pip install xata`."
)
self._client = XataClient(
api_key=api_key, db_url=db_url, branch_name=branch_name
)
self._table_name = table_name
self._session_id = session_id
if create_table:
self._create_table_if_not_exists()
def _create_table_if_not_exists(self) -> None:
r = self._client.table().get_schema(self._table_name)
if r.status_code <= 299:
return
if r.status_code != 404:
raise Exception(
f"Error checking if table exists in Xata: {r.status_code} {r}"
)
r = self._client.table().create(self._table_name)
if r.status_code > 299:
raise Exception(f"Error creating table in Xata: {r.status_code} {r}")
r = self._client.table().set_schema(
self._table_name,
payload={
"columns": [
{"name": "sessionId", "type": "string"},
{"name": "type", "type": "string"},
{"name": "role", "type": "string"},
{"name": "content", "type": "text"},
{"name": "name", "type": "string"},
{"name": "additionalKwargs", "type": "json"},
]
},
)
if r.status_code > 299:
raise Exception(f"Error setting table schema in Xata: {r.status_code} {r}")
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the Xata table"""
msg = message_to_dict(message)
r = self._client.records().insert(
self._table_name,
{
"sessionId": self._session_id,
"type": msg["type"],
"content": message.content,
"additionalKwargs": json.dumps(message.additional_kwargs),
"role": msg["data"].get("role"),
"name": msg["data"].get("name"),
},
)
if r.status_code > 299:
raise Exception(f"Error adding message to Xata: {r.status_code} {r}")
@property
def messages(self) -> List[BaseMessage]: # type: ignore[override]
r = self._client.data().query(
self._table_name,
payload={
"filter": {
"sessionId": self._session_id,
},
"sort": {"xata.createdAt": "asc"},
},
)
if r.status_code != 200:
raise Exception(f"Error running query: {r.status_code} {r}")
msgs = messages_from_dict(
[
{
"type": m["type"],
"data": {
"content": m["content"],
"role": m.get("role"),
"name": m.get("name"),
"additional_kwargs": json.loads(m["additionalKwargs"]),
},
}
for m in r["records"]
]
)
return msgs
def clear(self) -> None:
"""Delete session from Xata table."""
while True:
r = self._client.data().query(
self._table_name,
payload={
"columns": ["id"],
"filter": {
"sessionId": self._session_id,
},
},
)
if r.status_code != 200:
raise Exception(f"Error running query: {r.status_code} {r}")
ids = [rec["id"] for rec in r["records"]]
if len(ids) == 0:
break
operations = [
{"delete": {"table": self._table_name, "id": id}} for id in ids
]
self._client.records().transaction(payload={"operations": operations})
|
import json
from typing import List
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.messages import (
BaseMessage,
message_to_dict,
messages_from_dict,
)
class XataChatMessageHistory(BaseChatMessageHistory):
"""Chat message history stored in a Xata database."""
def __init__(
self,
session_id: str,
db_url: str,
api_key: str,
branch_name: str = "main",
table_name: str = "messages",
create_table: bool = True,
) -> None:
"""Initialize with Xata client."""
try:
from xata.client import XataClient
except ImportError:
raise ImportError(
"Could not import xata python package. "
"Please install it with `pip install xata`."
)
self._client = XataClient(
api_key=api_key, db_url=db_url, branch_name=branch_name
)
self._table_name = table_name
self._session_id = session_id
if create_table:
self._create_table_if_not_exists()
def _create_table_if_not_exists(self) -> None:
r = self._client.table().get_schema(self._table_name)
if r.status_code <= 299:
return
if r.status_code != 404:
raise Exception(
f"Error checking if table exists in Xata: {r.status_code} {r}"
)
r = self._client.table().create(self._table_name)
if r.status_code > 299:
raise Exception(f"Error creating table in Xata: {r.status_code} {r}")
r = self._client.table().set_schema(
self._table_name,
payload={
"columns": [
{"name": "sessionId", "type": "string"},
{"name": "type", "type": "string"},
{"name": "role", "type": "string"},
{"name": "content", "type": "text"},
{"name": "name", "type": "string"},
{"name": "additionalKwargs", "type": "json"},
]
},
)
if r.status_code > 299:
raise Exception(f"Error setting table schema in Xata: {r.status_code} {r}")
def add_message(self, message: BaseMessage) -> None:
"""Append the message to the Xata table"""
msg = message_to_dict(message)
r = self._client.records().insert(
self._table_name,
{
"sessionId": self._session_id,
"type": msg["type"],
"content": message.content,
"additionalKwargs": json.dumps(message.additional_kwargs),
"role": msg["data"].get("role"),
"name": msg["data"].get("name"),
},
)
if r.status_code > 299:
raise Exception(f"Error adding message to Xata: {r.status_code} {r}")
@property
def messages(self) -> List[BaseMessage]: # type: ignore
r = self._client.data().query(
self._table_name,
payload={
"filter": {
"sessionId": self._session_id,
},
"sort": {"xata.createdAt": "asc"},
},
)
if r.status_code != 200:
raise Exception(f"Error running query: {r.status_code} {r}")
msgs = messages_from_dict(
[
{
"type": m["type"],
"data": {
"content": m["content"],
"role": m.get("role"),
"name": m.get("name"),
"additional_kwargs": json.loads(m["additionalKwargs"]),
},
}
for m in r["records"]
]
)
return msgs
def clear(self) -> None:
"""Delete session from Xata table."""
while True:
r = self._client.data().query(
self._table_name,
payload={
"columns": ["id"],
"filter": {
"sessionId": self._session_id,
},
},
)
if r.status_code != 200:
raise Exception(f"Error running query: {r.status_code} {r}")
ids = [rec["id"] for rec in r["records"]]
if len(ids) == 0:
break
operations = [
{"delete": {"table": self._table_name, "id": id}} for id in ids
]
self._client.records().transaction(payload={"operations": operations})
|
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredODTLoader(UnstructuredFileLoader):
"""Load `OpenOffice ODT` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredODTLoader
loader = UnstructuredODTLoader(
"example.odt", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-odt
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Args:
file_path: The path to the file to load.
mode: The mode to use when loading the file. Can be one of "single",
"multi", or "all". Default is "single".
**unstructured_kwargs: Any kwargs to pass to the unstructured.
"""
file_path = str(file_path)
validate_unstructured_version(min_unstructured_version="0.6.3")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.odt import partition_odt
return partition_odt(filename=self.file_path, **self.unstructured_kwargs)
|
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredODTLoader(UnstructuredFileLoader):
"""Load `OpenOffice ODT` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain_community.document_loaders import UnstructuredODTLoader
loader = UnstructuredODTLoader(
"example.odt", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-odt
"""
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Args:
file_path: The path to the file to load.
mode: The mode to use when loading the file. Can be one of "single",
"multi", or "all". Default is "single".
**unstructured_kwargs: Any kwargs to pass to the unstructured.
"""
file_path = str(file_path)
validate_unstructured_version(min_unstructured_version="0.6.3")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.odt import partition_odt
return partition_odt(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
# mypy: allow-untyped-defs
import functools
from collections.abc import Hashable
from dataclasses import dataclass, fields
from typing import TypeVar
from typing_extensions import dataclass_transform
T = TypeVar("T", bound="_Union")
class _UnionTag(str):
__slots__ = ("_cls",)
_cls: Hashable
@staticmethod
def create(t, cls):
tag = _UnionTag(t)
assert not hasattr(tag, "_cls")
tag._cls = cls
return tag
def __eq__(self, cmp) -> bool:
assert isinstance(cmp, str)
other = str(cmp)
assert other in _get_field_names(self._cls), (
f"{other} is not a valid tag for {self._cls}. Available tags: {_get_field_names(self._cls)}"
)
return str(self) == other
def __hash__(self):
return hash(str(self))
@functools.cache
def _get_field_names(cls) -> set[str]:
return {f.name for f in fields(cls)}
# If you turn a schema class that inherits from union into a dataclass, please use
# this decorator to configure it. It's safe, faster and allows code sharing.
#
# For example, _union_dataclass customizes the __eq__ method to only check the type
# and value property instead of default implmentation of dataclass which goes
# through every field in the dataclass.
@dataclass_transform(eq_default=False)
def _union_dataclass(cls: type[T]) -> type[T]:
assert issubclass(cls, _Union), f"{cls} must inheirt from {_Union}."
return dataclass(repr=False, eq=False)(cls)
class _Union:
_type: _UnionTag
@classmethod
def create(cls, **kwargs):
assert len(kwargs) == 1
obj = cls(**{**{f.name: None for f in fields(cls)}, **kwargs}) # type: ignore[arg-type]
obj._type = _UnionTag.create(next(iter(kwargs.keys())), cls)
return obj
def __post_init__(self):
assert not any(
f.name in ("type", "_type", "create", "value")
for f in fields(self) # type: ignore[arg-type, misc]
)
@property
def type(self) -> str:
try:
return self._type
except AttributeError as e:
raise RuntimeError(
f"Please use {type(self).__name__}.create to instantiate the union type."
) from e
@property
def value(self):
return getattr(self, self.type)
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if attr is None and name in _get_field_names(type(self)) and name != self.type: # type: ignore[arg-type]
raise AttributeError(f"Field {name} is not set.")
return attr
def __eq__(self, other: object) -> bool:
if not isinstance(other, _Union):
return False
return self.type == other.type and self.value == other.value
def __str__(self):
return self.__repr__()
def __repr__(self):
return f"{type(self).__name__}({self.type}={getattr(self, self.type)})"
|
# mypy: allow-untyped-defs
import functools
from collections.abc import Hashable
from dataclasses import fields
class _UnionTag(str):
__slots__ = ("_cls",)
_cls: Hashable
@staticmethod
def create(t, cls):
tag = _UnionTag(t)
assert not hasattr(tag, "_cls")
tag._cls = cls
return tag
def __eq__(self, cmp) -> bool:
assert isinstance(cmp, str)
other = str(cmp)
assert other in _get_field_names(self._cls), (
f"{other} is not a valid tag for {self._cls}. Available tags: {_get_field_names(self._cls)}"
)
return str(self) == other
def __hash__(self):
return hash(str(self))
@functools.cache
def _get_field_names(cls) -> set[str]:
return {f.name for f in fields(cls)}
class _Union:
_type: _UnionTag
@classmethod
def create(cls, **kwargs):
assert len(kwargs) == 1
obj = cls(**{**{f.name: None for f in fields(cls)}, **kwargs}) # type: ignore[arg-type]
obj._type = _UnionTag.create(next(iter(kwargs.keys())), cls)
return obj
def __post_init__(self):
assert not any(
f.name in ("type", "_type", "create", "value")
for f in fields(self) # type: ignore[arg-type, misc]
)
@property
def type(self) -> str:
try:
return self._type
except AttributeError as e:
raise RuntimeError(
f"Please use {type(self).__name__}.create to instantiate the union type."
) from e
@property
def value(self):
return getattr(self, self.type)
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if attr is None and name in _get_field_names(type(self)) and name != self.type: # type: ignore[arg-type]
raise AttributeError(f"Field {name} is not set.")
return attr
def __str__(self):
return self.__repr__()
def __repr__(self):
return f"{type(self).__name__}({self.type}={getattr(self, self.type)})"
|
"""**Document Transformers** are classes to transform Documents.
**Document Transformers** usually used to transform a lot of Documents in a single run.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> <name> # Examples: DoctranQATransformer, DoctranTextTranslator
**Main helpers:**
.. code-block::
Document
""" # noqa: E501
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import (
BeautifulSoupTransformer,
DoctranPropertyExtractor,
DoctranQATransformer,
DoctranTextTranslator,
EmbeddingsClusteringFilter,
EmbeddingsRedundantFilter,
GoogleTranslateTransformer,
Html2TextTransformer,
LongContextReorder,
NucliaTextTransformer,
OpenAIMetadataTagger,
get_stateful_documents,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BeautifulSoupTransformer": "langchain_community.document_transformers",
"DoctranQATransformer": "langchain_community.document_transformers",
"DoctranTextTranslator": "langchain_community.document_transformers",
"DoctranPropertyExtractor": "langchain_community.document_transformers",
"EmbeddingsClusteringFilter": "langchain_community.document_transformers",
"EmbeddingsRedundantFilter": "langchain_community.document_transformers",
"GoogleTranslateTransformer": "langchain_community.document_transformers",
"get_stateful_documents": "langchain_community.document_transformers",
"LongContextReorder": "langchain_community.document_transformers",
"NucliaTextTransformer": "langchain_community.document_transformers",
"OpenAIMetadataTagger": "langchain_community.document_transformers",
"Html2TextTransformer": "langchain_community.document_transformers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BeautifulSoupTransformer",
"DoctranPropertyExtractor",
"DoctranQATransformer",
"DoctranTextTranslator",
"EmbeddingsClusteringFilter",
"EmbeddingsRedundantFilter",
"GoogleTranslateTransformer",
"Html2TextTransformer",
"LongContextReorder",
"NucliaTextTransformer",
"OpenAIMetadataTagger",
"get_stateful_documents",
]
|
"""**Document Transformers** are classes to transform Documents.
**Document Transformers** usually used to transform a lot of Documents in a single run.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> <name> # Examples: DoctranQATransformer, DoctranTextTranslator
**Main helpers:**
.. code-block::
Document
""" # noqa: E501
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import (
BeautifulSoupTransformer,
DoctranPropertyExtractor,
DoctranQATransformer,
DoctranTextTranslator,
EmbeddingsClusteringFilter,
EmbeddingsRedundantFilter,
GoogleTranslateTransformer,
Html2TextTransformer,
LongContextReorder,
NucliaTextTransformer,
OpenAIMetadataTagger,
get_stateful_documents,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BeautifulSoupTransformer": "langchain_community.document_transformers",
"DoctranQATransformer": "langchain_community.document_transformers",
"DoctranTextTranslator": "langchain_community.document_transformers",
"DoctranPropertyExtractor": "langchain_community.document_transformers",
"EmbeddingsClusteringFilter": "langchain_community.document_transformers",
"EmbeddingsRedundantFilter": "langchain_community.document_transformers",
"GoogleTranslateTransformer": "langchain_community.document_transformers",
"get_stateful_documents": "langchain_community.document_transformers",
"LongContextReorder": "langchain_community.document_transformers",
"NucliaTextTransformer": "langchain_community.document_transformers",
"OpenAIMetadataTagger": "langchain_community.document_transformers",
"Html2TextTransformer": "langchain_community.document_transformers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BeautifulSoupTransformer",
"DoctranQATransformer",
"DoctranTextTranslator",
"DoctranPropertyExtractor",
"EmbeddingsClusteringFilter",
"EmbeddingsRedundantFilter",
"GoogleTranslateTransformer",
"get_stateful_documents",
"LongContextReorder",
"NucliaTextTransformer",
"OpenAIMetadataTagger",
"Html2TextTransformer",
]
|
import os
from pathlib import Path
import pytest
from pytest_kind import KindCluster, cluster
from jina.logging.logger import JinaLogger
from tests.k8s_otel.kind_wrapper import KindClusterWrapperV2
# The default version broke cni at some point. That's why we need to specify the version here.
# This can and probably should be put in env variable actually.
cluster.KIND_VERSION = 'v0.11.1'
# TODO: Can we get jina image to build here as well?
@pytest.fixture(scope='session', autouse=True)
def build_and_load_images(k8s_cluster_v2: KindClusterWrapperV2) -> None:
CUR_DIR = Path(__file__).parent
k8s_cluster_v2.build_and_load_docker_image(
str(CUR_DIR / 'test-instrumentation'), 'test-instrumentation', 'test-pip'
)
k8s_cluster_v2.load_docker_image(image_name='jinaai/jina', tag='test-pip')
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
if 'JINA_GATEWAY_IMAGE' in os.environ: # maybe another fixture has already removed
del os.environ['JINA_GATEWAY_IMAGE']
k8s_cluster_v2.remove_docker_image('test-instrumentation', 'test-pip')
@pytest.fixture(scope='session')
def k8s_cluster_v2(kind_cluster: KindCluster) -> KindClusterWrapperV2:
return KindClusterWrapperV2(kind_cluster, JinaLogger('kubernetes-cluster-logger'))
@pytest.fixture(scope='session')
def otel_test_namespace(k8s_cluster_v2: KindClusterWrapperV2) -> str:
"""
Returns a namespace with the otlp-collector, jaeger and prometheus services deployed.
Service endpoints:
- otlp-collector:4317 - OTLP GRPC metrics endpoint
- jaeger:16686 - API endpoint
- jaeger:4317 - OTLP GRPC tracing endpoint
- prometheus:9090 - API endpoint
- otlphttp-mirror - OTLP HTTP metrics endpoint
Nice for development and debugging:
- jaeger:16686 - UI endpoint
- prometheus:9090 - UI endpoint
- grafana:3000 - UI endpoint
Returns:
The namespace name
"""
NAMESPACE = 'otel-test'
ARTIFACT_DIR = os.path.join(os.path.dirname(__file__), NAMESPACE)
k8s_cluster_v2.deploy_from_dir(dir=ARTIFACT_DIR, namespace=NAMESPACE)
return NAMESPACE
|
import os
from pathlib import Path
import pytest
from pytest_kind import KindCluster, cluster
from jina.logging.logger import JinaLogger
from tests.k8s_otel.kind_wrapper import KindClusterWrapperV2
# The default version broke cni at some point. That's why we need to specify the version here.
# This can and probably should be put in env variable actually.
cluster.KIND_VERSION = 'v0.11.1'
# TODO: Can we get jina image to build here as well?
@pytest.fixture(scope='session', autouse=True)
def build_and_load_images(k8s_cluster_v2: KindClusterWrapperV2) -> None:
CUR_DIR = Path(__file__).parent
k8s_cluster_v2.build_and_load_docker_image(
str(CUR_DIR / 'test-instrumentation'), 'test-instrumentation', 'test-pip'
)
k8s_cluster_v2.load_docker_image(image_name='jinaai/jina', tag='test-pip')
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
del os.environ['JINA_GATEWAY_IMAGE']
k8s_cluster_v2.remove_docker_image('test-instrumentation', 'test-pip')
@pytest.fixture(scope='session')
def k8s_cluster_v2(kind_cluster: KindCluster) -> KindClusterWrapperV2:
return KindClusterWrapperV2(kind_cluster, JinaLogger('kubernetes-cluster-logger'))
@pytest.fixture(scope='session')
def otel_test_namespace(k8s_cluster_v2: KindClusterWrapperV2) -> str:
"""
Returns a namespace with the otlp-collector, jaeger and prometheus services deployed.
Service endpoints:
- otlp-collector:4317 - OTLP GRPC metrics endpoint
- jaeger:16686 - API endpoint
- jaeger:4317 - OTLP GRPC tracing endpoint
- prometheus:9090 - API endpoint
- otlphttp-mirror - OTLP HTTP metrics endpoint
Nice for development and debugging:
- jaeger:16686 - UI endpoint
- prometheus:9090 - UI endpoint
- grafana:3000 - UI endpoint
Returns:
The namespace name
"""
NAMESPACE = 'otel-test'
ARTIFACT_DIR = os.path.join(os.path.dirname(__file__), NAMESPACE)
k8s_cluster_v2.deploy_from_dir(dir=ARTIFACT_DIR, namespace=NAMESPACE)
return NAMESPACE
|
import os
import numpy as np
from PIL import Image
from docarray import Document
from docarray.dataclasses.getter import (
audio_getter,
image_getter,
json_getter,
text_getter,
uri_getter,
)
from docarray.dataclasses.enums import DocumentMetadata, ImageType
cur_dir = os.path.dirname(os.path.abspath(__file__))
IMAGE_URI = os.path.join(cur_dir, 'toydata/test.png')
def test_image_deserializer():
doc = Document()
doc._metadata[DocumentMetadata.IMAGE_TYPE] = ImageType.URI
doc.uri = 'image_uri'
assert image_getter(doc) == 'image_uri'
doc._metadata[DocumentMetadata.IMAGE_TYPE] = ImageType.NDARRAY
im = Image.open(IMAGE_URI)
doc.tensor = np.asarray(im)
assert np.all(image_getter(doc) == np.asarray(im))
doc._metadata[DocumentMetadata.IMAGE_TYPE] = ImageType.PIL
assert np.all(image_getter(doc) == Image.fromarray(np.asarray(im)))
def test_text_deserializer():
doc = Document(text='text')
assert (
text_getter(
doc,
)
== 'text'
)
def test_uri_deserializer():
doc = Document(uri='https://jina.ai')
assert (
uri_getter(
doc,
)
== 'https://jina.ai'
)
def test_json_deserializer():
doc = Document()
doc._metadata[DocumentMetadata.JSON_TYPE] = ''
doc.tags = 'attribute'
assert json_getter(doc) == 'attribute'
def test_audio_deserializer():
doc = Document()
ad = Image.open(IMAGE_URI)
doc.tensor = np.asarray(ad)
assert np.all(audio_getter(doc) == Image.fromarray(np.asarray(ad)))
|
import os
import numpy as np
from PIL import Image
from docarray import Document
from docarray.dataclasses.getter import (
audio_getter,
image_getter,
json_getter,
text_getter,
uri_getter,
)
cur_dir = os.path.dirname(os.path.abspath(__file__))
IMAGE_URI = os.path.join(cur_dir, 'toydata/test.png')
def test_image_deserializer():
doc = Document()
doc._metadata['image_type'] = 'uri'
doc.uri = 'image_uri'
assert image_getter(doc) == 'image_uri'
doc._metadata['image_type'] = 'ndarray'
im = Image.open(IMAGE_URI)
doc.tensor = np.asarray(im)
assert np.all(image_getter(doc) == np.asarray(im))
doc._metadata['image_type'] = 'PIL'
assert np.all(image_getter(doc) == Image.fromarray(np.asarray(im)))
def test_text_deserializer():
doc = Document(text='text')
assert (
text_getter(
doc,
)
== 'text'
)
def test_uri_deserializer():
doc = Document(uri='https://jina.ai')
assert (
uri_getter(
doc,
)
== 'https://jina.ai'
)
def test_json_deserializer():
doc = Document()
doc._metadata['json_type'] = ''
doc.tags = 'attribute'
assert json_getter(doc) == 'attribute'
def test_audio_deserializer():
doc = Document()
ad = Image.open(IMAGE_URI)
doc.tensor = np.asarray(ad)
assert np.all(audio_getter(doc) == Image.fromarray(np.asarray(ad)))
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache # noqa F401
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
from .webdataset import webdataset
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
import inspect
import re
from typing import Dict, List
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache # noqa F401
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
from .webdataset import webdataset
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
from llama_index.core.utils import infer_torch_device
DEFAULT_SENTENCE_TRANSFORMER_MAX_LENGTH = 512
class SentenceTransformerRerank(BaseNodePostprocessor):
model: str = Field(description="Sentence transformer model name.")
top_n: int = Field(description="Number of nodes to return sorted by score.")
device: str = Field(
default="cpu",
description="Device to use for sentence transformer.",
)
keep_retrieval_score: bool = Field(
default=False,
description="Whether to keep the retrieval score in metadata.",
)
trust_remote_code: bool = Field(
default=False,
description="Whether to trust remote code.",
)
_model: Any = PrivateAttr()
def __init__(
self,
top_n: int = 2,
model: str = "cross-encoder/stsb-distilroberta-base",
device: Optional[str] = None,
keep_retrieval_score: bool = False,
trust_remote_code: bool = True,
):
try:
from sentence_transformers import CrossEncoder # pants: no-infer-dep
except ImportError:
raise ImportError(
"Cannot import sentence-transformers or torch package,",
"please `pip install torch sentence-transformers`",
)
device = infer_torch_device() if device is None else device
super().__init__(
top_n=top_n,
model=model,
device=device,
keep_retrieval_score=keep_retrieval_score,
)
self._model = CrossEncoder(
model,
max_length=DEFAULT_SENTENCE_TRANSFORMER_MAX_LENGTH,
device=device,
trust_remote_code=trust_remote_code,
)
@classmethod
def class_name(cls) -> str:
return "SentenceTransformerRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
query_and_nodes = [
(
query_bundle.query_str,
node.node.get_content(metadata_mode=MetadataMode.EMBED),
)
for node in nodes
]
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
scores = self._model.predict(query_and_nodes)
assert len(scores) == len(nodes)
for node, score in zip(nodes, scores):
if self.keep_retrieval_score:
# keep the retrieval score in metadata
node.node.metadata["retrieval_score"] = node.score
node.score = score
new_nodes = sorted(nodes, key=lambda x: -x.score if x.score else 0)[
: self.top_n
]
event.on_end(payload={EventPayload.NODES: new_nodes})
return new_nodes
|
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
from llama_index.core.utils import infer_torch_device
DEFAULT_SENTENCE_TRANSFORMER_MAX_LENGTH = 512
class SentenceTransformerRerank(BaseNodePostprocessor):
model: str = Field(description="Sentence transformer model name.")
top_n: int = Field(description="Number of nodes to return sorted by score.")
device: str = Field(
default="cpu",
description="Device to use for sentence transformer.",
)
keep_retrieval_score: bool = Field(
default=False,
description="Whether to keep the retrieval score in metadata.",
)
trust_remote_code: bool = Field(
default=False,
description="Whether to trust remote code.",
)
_model: Any = PrivateAttr()
def __init__(
self,
top_n: int = 2,
model: str = "cross-encoder/stsb-distilroberta-base",
device: Optional[str] = None,
keep_retrieval_score: Optional[bool] = False,
trust_remote_code: Optional[bool] = True,
):
try:
from sentence_transformers import CrossEncoder # pants: no-infer-dep
except ImportError:
raise ImportError(
"Cannot import sentence-transformers or torch package,",
"please `pip install torch sentence-transformers`",
)
device = infer_torch_device() if device is None else device
super().__init__(
top_n=top_n,
model=model,
device=device,
keep_retrieval_score=keep_retrieval_score,
)
self._model = CrossEncoder(
model,
max_length=DEFAULT_SENTENCE_TRANSFORMER_MAX_LENGTH,
device=device,
trust_remote_code=trust_remote_code,
)
@classmethod
def class_name(cls) -> str:
return "SentenceTransformerRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
query_and_nodes = [
(
query_bundle.query_str,
node.node.get_content(metadata_mode=MetadataMode.EMBED),
)
for node in nodes
]
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
scores = self._model.predict(query_and_nodes)
assert len(scores) == len(nodes)
for node, score in zip(nodes, scores):
if self.keep_retrieval_score:
# keep the retrieval score in metadata
node.node.metadata["retrieval_score"] = node.score
node.score = score
new_nodes = sorted(nodes, key=lambda x: -x.score if x.score else 0)[
: self.top_n
]
event.on_end(payload={EventPayload.NODES: new_nodes})
return new_nodes
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from .utils import demo_mm_inputs, get_detector_cfg
class TestSingleStageDetector(TestCase):
@parameterized.expand([
'retinanet/retinanet_r18_fpn_1x_coco.py',
'centernet/centernet_resnet18_140e_coco.py',
'fsaf/fsaf_r50_fpn_1x_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
assert detector.backbone
assert detector.neck
assert detector.bbox_head
assert detector.device.type == 'cpu'
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda')),
('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')),
])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward train
losses = detector.forward(packed_inputs, return_loss=True)
assert isinstance(losses, dict)
# Test forward_dummy
batch = torch.ones((1, 3, 64, 64)).to(device=device)
out = detector.forward_dummy(batch)
assert isinstance(out, tuple)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda')),
('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
packed_inputs, return_loss=False)
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet import * # noqa
from mmdet.core import DetDataSample
from .utils import demo_mm_inputs, get_detector_cfg
class TestSingleStageDetector(TestCase):
@parameterized.expand([
'retinanet/retinanet_r18_fpn_1x_coco.py',
'centernet/centernet_resnet18_140e_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
assert detector.backbone
assert detector.neck
assert detector.bbox_head
assert detector.device.type == 'cpu'
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_train(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward train
losses = detector.forward(packed_inputs, return_loss=True)
assert isinstance(losses, dict)
# Test forward_dummy
batch = torch.ones((1, 3, 64, 64)).to(device=device)
out = detector.forward_dummy(batch)
assert isinstance(out, tuple)
@parameterized.expand([
('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')),
('centernet/centernet_resnet18_140e_coco.py', ('cpu', 'cuda'))
])
def test_single_stage_forward_test(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.models import build_detector
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = build_detector(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
assert detector.device.type == device
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
packed_inputs, return_loss=False)
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
|
"""
This example computes the score between a query and all possible
sentences in a corpus using a Cross-Encoder for semantic textual similarity (STS).
It output then the most similar sentences for the given query.
"""
from sentence_transformers.cross_encoder import CrossEncoder
import numpy as np
# Pre-trained cross encoder
model = CrossEncoder("cross-encoder/stsb-distilroberta-base")
# We want to compute the similarity between the query sentence
query = "A man is eating pasta."
# With all sentences in the corpus
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# 1. We rank all sentences in the corpus for the query
ranks = model.rank(query, corpus)
# Print the scores
print("Query:", query)
for rank in ranks:
print(f"{rank['score']:.2f}\t{corpus[rank['corpus_id']]}")
# 2. Alternatively, you can also manually compute the score between two sentences
sentence_combinations = [[query, sentence] for sentence in corpus]
scores = model.predict(sentence_combinations)
# Sort the scores in decreasing order to get the corpus indices
ranked_indices = np.argsort(scores)[::-1]
print("scores:", scores)
print("indices:", ranked_indices)
|
"""
This example computes the score between a query and all possible
sentences in a corpus using a Cross-Encoder for semantic textual similarity (STS).
It output then the most similar sentences for the given query.
"""
from sentence_transformers.cross_encoder import CrossEncoder
import numpy as np
# Pre-trained cross encoder
model = CrossEncoder("cross-encoder/stsb-distilroberta-base")
# We want to compute the similarity between the query sentence
query = "A man is eating pasta."
# With all sentences in the corpus
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# 1. We rank all sentences in the corpus for the query
ranks = model.rank(query, corpus)
# Print the scores
print("Query:", query)
for rank in ranks:
print(f"{rank['score']:.2f}\t{corpus[rank['corpus_id']]}")
# 2. Alternatively, you can also manually compute the score between two sentences
sentence_combinations = [[query, sentence] for sentence in corpus]
scores = model.predict(sentence_combinations)
# Sort the scores in decreasing order to get the corpus indices
ranked_indices = np.argsort(scores)[::-1]
print("scores:", scores)
print("indices:", ranked_indices)
|
from typing import Any
from typing_extensions import Self
import torch
import torch.ao.nn.intrinsic as nni
import torch.ao.nn.quantized.dynamic as nnqd
__all__ = ["LinearReLU"]
class LinearReLU(nnqd.Linear):
r"""
A LinearReLU module fused from Linear and ReLU modules that can be used
for dynamic quantization.
Supports both, FP16 and INT8 quantization.
We adopt the same interface as :class:`torch.ao.nn.quantized.dynamic.Linear`.
Attributes:
Same as torch.ao.nn.quantized.dynamic.Linear
Examples::
>>> # xdoctest: +SKIP
>>> m = nn.intrinsic.quantized.dynamic.LinearReLU(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
_FLOAT_MODULE = nni.LinearReLU
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
dtype: torch.dtype = torch.qint8,
) -> None:
super().__init__(in_features, out_features, bias, dtype)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self._packed_params.dtype == torch.qint8:
# TODO check if we should set reduce_rage = True by default here
Y = torch.ops.quantized.linear_relu_dynamic(
x, self._packed_params._packed_params, reduce_range=True
)
elif self._packed_params.dtype == torch.float16:
Y = torch.ops.quantized.linear_relu_dynamic_fp16(
x, self._packed_params._packed_params
)
else:
raise RuntimeError("Unsupported dtype on dynamic quantized linear relu!")
return Y.to(x.dtype)
def _get_name(self) -> str:
return "DynamicQuantizedLinearReLU"
@classmethod
def from_float(
cls, mod: torch.nn.Module, use_precomputed_fake_quant: bool = False
) -> Self:
return super().from_float(
mod, use_precomputed_fake_quant=use_precomputed_fake_quant
)
@classmethod
def from_reference(cls, ref_qlinear_relu: Any) -> Self: # type: ignore[override]
return super().from_reference(ref_qlinear_relu[0])
|
# mypy: allow-untyped-defs
import torch
import torch.ao.nn.intrinsic as nni
import torch.ao.nn.quantized.dynamic as nnqd
__all__ = ["LinearReLU"]
class LinearReLU(nnqd.Linear):
r"""
A LinearReLU module fused from Linear and ReLU modules that can be used
for dynamic quantization.
Supports both, FP16 and INT8 quantization.
We adopt the same interface as :class:`torch.ao.nn.quantized.dynamic.Linear`.
Attributes:
Same as torch.ao.nn.quantized.dynamic.Linear
Examples::
>>> # xdoctest: +SKIP
>>> m = nn.intrinsic.quantized.dynamic.LinearReLU(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
_FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment]
def __init__(self, in_features, out_features, bias=True, dtype=torch.qint8):
super().__init__(in_features, out_features, bias, dtype)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self._packed_params.dtype == torch.qint8:
# TODO check if we should set reduce_rage = True by default here
Y = torch.ops.quantized.linear_relu_dynamic(
x, self._packed_params._packed_params, reduce_range=True
)
elif self._packed_params.dtype == torch.float16:
Y = torch.ops.quantized.linear_relu_dynamic_fp16(
x, self._packed_params._packed_params
)
else:
raise RuntimeError("Unsupported dtype on dynamic quantized linear relu!")
return Y.to(x.dtype)
def _get_name(self):
return "DynamicQuantizedLinearReLU"
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
return super().from_float(
mod, use_precomputed_fake_quant=use_precomputed_fake_quant
)
@classmethod
def from_reference(cls, ref_qlinear_relu): # type: ignore[override]
return super().from_reference(ref_qlinear_relu[0])
|
from typing import Callable, Dict, Generic, List, Optional, Type, TypeVar
from torch.utils.data import Dataset
from docarray import BaseDocument, DocumentArray, DocumentArrayStacked
from docarray.typing import TorchTensor
from docarray.utils._typing import change_cls_name
T_doc = TypeVar('T_doc', bound=BaseDocument)
class MultiModalDataset(Dataset, Generic[T_doc]):
"""
A dataset that can be used inside a PyTorch DataLoader.
In other words, it implements the PyTorch Dataset interface.
:param da: the DocumentArray to be used as the dataset
:param preprocessing: a dictionary of field names and preprocessing functions
The preprocessing dictionary passed to the constructor consists of keys that are
field names and values that are functions that take a single argument and return
a single argument.
EXAMPLE USAGE
.. code-block:: python
from torch.utils.data import DataLoader
from docarray import DocumentArray
from docarray.data import MultiModalDataset
from docarray.documents import Text
def prepend_number(text: str):
return f"Number {text}"
da = DocumentArray[Text](Text(text=str(i)) for i in range(16))
ds = MultiModalDataset[Text](da, preprocessing={'text': prepend_number})
loader = DataLoader(ds, batch_size=4, collate_fn=MultiModalDataset[Text].collate_fn)
for batch in loader:
print(batch.text)
Nested fields can be accessed by using dot notation.
The document itself can be accessed using the empty string as the key.
Transformations that operate on reference types (such as Documents) can optionally
not return a value.
The transformations will be applied according to their order in the dictionary.
EXAMPLE USAGE
.. code-block:: python
import torch
from torch.utils.data import DataLoader
from docarray import DocumentArray, BaseDocument
from docarray.data import MultiModalDataset
from docarray.documents import Text
class Thesis(BaseDocument):
title: Text
class Student(BaseDocument):
thesis: Thesis
def embed_title(title: Text):
title.embedding = torch.ones(4)
def normalize_embedding(thesis: Thesis):
thesis.title.embedding = thesis.title.embedding / thesis.title.embedding.norm()
def add_nonsense(student: Student):
student.thesis.title.embedding = student.thesis.title.embedding + int(
student.thesis.title.text
)
da = DocumentArray[Student](Student(thesis=Thesis(title=str(i))) for i in range(16))
ds = MultiModalDataset[Student](
da,
preprocessing={
"thesis.title": embed_title,
"thesis": normalize_embedding,
"": add_nonsense,
},
)
loader = DataLoader(ds, batch_size=4, collate_fn=ds.collate_fn)
for batch in loader:
print(batch.thesis.title.embedding)
"""
document_type: Optional[Type[BaseDocument]] = None
__typed_ds__: Dict[Type[BaseDocument], Type['MultiModalDataset']] = {}
def __init__(
self, da: 'DocumentArray[T_doc]', preprocessing: Dict[str, Callable]
) -> None:
self.da = da
self._preprocessing = preprocessing
def __len__(self):
return len(self.da)
def __getitem__(self, item: int):
doc = self.da[item].copy(deep=True)
for field, preprocess in self._preprocessing.items():
if len(field) == 0:
doc = preprocess(doc) or doc
else:
acc_path = field.split('.')
_field_ref = doc
for attr in acc_path[:-1]:
_field_ref = getattr(_field_ref, attr)
attr = acc_path[-1]
value = getattr(_field_ref, attr)
setattr(_field_ref, attr, preprocess(value) or value)
return doc
@classmethod
def collate_fn(cls, batch: List[T_doc]):
doc_type = cls.document_type
if doc_type:
batch_da = DocumentArrayStacked[doc_type]( # type: ignore
batch,
tensor_type=TorchTensor,
)
else:
batch_da = DocumentArrayStacked(batch, tensor_type=TorchTensor)
return batch_da
@classmethod
def __class_getitem__(cls, item: Type[BaseDocument]) -> Type['MultiModalDataset']:
if not issubclass(item, BaseDocument):
raise ValueError(
f'{cls.__name__}[item] item should be a Document not a {item} '
)
if item not in cls.__typed_ds__:
global _TypedDataset
class _TypedDataset(cls): # type: ignore
document_type = item
change_cls_name(
_TypedDataset, f'{cls.__name__}[{item.__name__}]', globals()
)
cls.__typed_ds__[item] = _TypedDataset
return cls.__typed_ds__[item]
|
from typing import Callable, Dict, Generic, List, Optional, Type, TypeVar
from torch.utils.data import Dataset
from docarray import BaseDocument, DocumentArray
from docarray.typing import TorchTensor
from docarray.utils._typing import change_cls_name
T_doc = TypeVar('T_doc', bound=BaseDocument)
class MultiModalDataset(Dataset, Generic[T_doc]):
"""
A dataset that can be used inside a PyTorch DataLoader.
In other words, it implements the PyTorch Dataset interface.
:param da: the DocumentArray to be used as the dataset
:param preprocessing: a dictionary of field names and preprocessing functions
The preprocessing dictionary passed to the constructor consists of keys that are
field names and values that are functions that take a single argument and return
a single argument.
EXAMPLE USAGE
.. code-block:: python
from torch.utils.data import DataLoader
from docarray import DocumentArray
from docarray.data import MultiModalDataset
from docarray.documents import Text
def prepend_number(text: str):
return f"Number {text}"
da = DocumentArray[Text](Text(text=str(i)) for i in range(16))
ds = MultiModalDataset[Text](da, preprocessing={'text': prepend_number})
loader = DataLoader(ds, batch_size=4, collate_fn=MultiModalDataset[Text].collate_fn)
for batch in loader:
print(batch.text)
Nested fields can be accessed by using dot notation.
The document itself can be accessed using the empty string as the key.
Transformations that operate on reference types (such as Documents) can optionally
not return a value.
The transformations will be applied according to their order in the dictionary.
EXAMPLE USAGE
.. code-block:: python
import torch
from torch.utils.data import DataLoader
from docarray import DocumentArray, BaseDocument
from docarray.data import MultiModalDataset
from docarray.documents import Text
class Thesis(BaseDocument):
title: Text
class Student(BaseDocument):
thesis: Thesis
def embed_title(title: Text):
title.embedding = torch.ones(4)
def normalize_embedding(thesis: Thesis):
thesis.title.embedding = thesis.title.embedding / thesis.title.embedding.norm()
def add_nonsense(student: Student):
student.thesis.title.embedding = student.thesis.title.embedding + int(
student.thesis.title.text
)
da = DocumentArray[Student](Student(thesis=Thesis(title=str(i))) for i in range(16))
ds = MultiModalDataset[Student](
da,
preprocessing={
"thesis.title": embed_title,
"thesis": normalize_embedding,
"": add_nonsense,
},
)
loader = DataLoader(ds, batch_size=4, collate_fn=ds.collate_fn)
for batch in loader:
print(batch.thesis.title.embedding)
"""
document_type: Optional[Type[BaseDocument]] = None
__typed_ds__: Dict[Type[BaseDocument], Type['MultiModalDataset']] = {}
def __init__(
self, da: 'DocumentArray[T_doc]', preprocessing: Dict[str, Callable]
) -> None:
self.da = da
self._preprocessing = preprocessing
def __len__(self):
return len(self.da)
def __getitem__(self, item: int):
doc = self.da[item].copy(deep=True)
for field, preprocess in self._preprocessing.items():
if len(field) == 0:
doc = preprocess(doc) or doc
else:
acc_path = field.split('.')
_field_ref = doc
for attr in acc_path[:-1]:
_field_ref = getattr(_field_ref, attr)
attr = acc_path[-1]
value = getattr(_field_ref, attr)
setattr(_field_ref, attr, preprocess(value) or value)
return doc
@classmethod
def collate_fn(cls, batch: List[T_doc]):
doc_type = cls.document_type
if doc_type:
batch_da = DocumentArray[doc_type]( # type: ignore
batch,
tensor_type=TorchTensor,
)
else:
batch_da = DocumentArray(batch, tensor_type=TorchTensor)
return batch_da.stack()
@classmethod
def __class_getitem__(cls, item: Type[BaseDocument]) -> Type['MultiModalDataset']:
if not issubclass(item, BaseDocument):
raise ValueError(
f'{cls.__name__}[item] item should be a Document not a {item} '
)
if item not in cls.__typed_ds__:
global _TypedDataset
class _TypedDataset(cls): # type: ignore
document_type = item
change_cls_name(
_TypedDataset, f'{cls.__name__}[{item.__name__}]', globals()
)
cls.__typed_ds__[item] = _TypedDataset
return cls.__typed_ds__[item]
|
import json
import os
import pickle
import tempfile
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
kRows = 100
kCols = 10
def generate_data():
X = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
return X, y
class TestPickling:
def run_model_pickling(self, xgb_params) -> str:
X, y = generate_data()
dtrain = xgb.DMatrix(X, y)
bst = xgb.train(xgb_params, dtrain)
dump_0 = bst.get_dump(dump_format='json')
assert dump_0
config_0 = bst.save_config()
filename = 'model.pkl'
with open(filename, 'wb') as fd:
pickle.dump(bst, fd)
with open(filename, 'rb') as fd:
bst = pickle.load(fd)
with open(filename, 'wb') as fd:
pickle.dump(bst, fd)
with open(filename, 'rb') as fd:
bst = pickle.load(fd)
assert bst.get_dump(dump_format='json') == dump_0
if os.path.exists(filename):
os.remove(filename)
config_1 = bst.save_config()
assert config_0 == config_1
return json.loads(config_0)
def test_model_pickling_json(self):
def check(config):
tree_param = config["learner"]["gradient_booster"]["tree_train_param"]
subsample = tree_param["subsample"]
assert float(subsample) == 0.5
params = {"nthread": 8, "tree_method": "hist", "subsample": 0.5}
config = self.run_model_pickling(params)
check(config)
params = {"nthread": 8, "tree_method": "exact", "subsample": 0.5}
config = self.run_model_pickling(params)
check(config)
@pytest.mark.skipif(**tm.no_sklearn())
def test_with_sklearn_obj_metric(self) -> None:
from sklearn.metrics import mean_squared_error
X, y = tm.datasets.make_regression()
reg = xgb.XGBRegressor(objective=tm.ls_obj, eval_metric=mean_squared_error)
reg.fit(X, y)
pkl = pickle.dumps(reg)
reg_1 = pickle.loads(pkl)
assert callable(reg_1.objective)
assert callable(reg_1.eval_metric)
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, "model.json")
reg.save_model(path)
reg_2 = xgb.XGBRegressor()
reg_2.load_model(path)
assert not callable(reg_2.objective)
assert not callable(reg_2.eval_metric)
assert reg_2.eval_metric is None
|
import json
import os
import pickle
import numpy as np
import xgboost as xgb
kRows = 100
kCols = 10
def generate_data():
X = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
return X, y
class TestPickling:
def run_model_pickling(self, xgb_params) -> str:
X, y = generate_data()
dtrain = xgb.DMatrix(X, y)
bst = xgb.train(xgb_params, dtrain)
dump_0 = bst.get_dump(dump_format='json')
assert dump_0
config_0 = bst.save_config()
filename = 'model.pkl'
with open(filename, 'wb') as fd:
pickle.dump(bst, fd)
with open(filename, 'rb') as fd:
bst = pickle.load(fd)
with open(filename, 'wb') as fd:
pickle.dump(bst, fd)
with open(filename, 'rb') as fd:
bst = pickle.load(fd)
assert bst.get_dump(dump_format='json') == dump_0
if os.path.exists(filename):
os.remove(filename)
config_1 = bst.save_config()
assert config_0 == config_1
return json.loads(config_0)
def test_model_pickling_json(self):
def check(config):
tree_param = config["learner"]["gradient_booster"]["tree_train_param"]
subsample = tree_param["subsample"]
assert float(subsample) == 0.5
params = {"nthread": 8, "tree_method": "hist", "subsample": 0.5}
config = self.run_model_pickling(params)
check(config)
params = {"nthread": 8, "tree_method": "exact", "subsample": 0.5}
config = self.run_model_pickling(params)
check(config)
|
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
def test_from_to_json():
da = DocList[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
json_da = da.to_json()
da2 = DocList[MyDoc].from_json(json_da)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
|
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
def test_from_to_json():
da = DocArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
json_da = da.to_json()
da2 = DocArray[MyDoc].from_json(json_da)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
|
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_CPU_VIDEO_DECODER,
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_gif,
decode_image,
decode_jpeg,
decode_png,
decode_webp,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_CPU_VIDEO_DECODER",
"_HAS_VIDEO_OPT",
"_HAS_GPU_VIDEO_DECODER",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
|
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_gif,
decode_image,
decode_jpeg,
decode_png,
decode_webp,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_VIDEO_OPT",
"_HAS_GPU_VIDEO_DECODER",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
|
__version__ = '0.14.12'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.14.11'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from torch import nn
from sentence_transformers.models.Module import Module
class LSTM(Module):
"""Bidirectional LSTM running over word embeddings."""
config_keys: list[str] = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
config_file_name: str = "lstm_config.json"
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
super().__init__()
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
# Saving LSTM models with Safetensors does not work unless the weights are on CPU
# See https://github.com/UKPLab/sentence-transformers/pull/2722
device = next(self.parameters()).device
self.cpu()
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
self.to(device)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class LSTM(nn.Module):
"""Bidirectional LSTM running over word embeddings."""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
device = next(self.parameters()).device
if safe_serialization:
save_safetensors_model(self.cpu(), os.path.join(output_path, "model.safetensors"))
self.to(device)
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json")) as fIn:
config = json.load(fIn)
model = LSTM(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
import json
import logging
import os
from collections import defaultdict
from pathlib import Path
from huggingface_hub import HfApi
import diffusers
PATH_TO_REPO = Path(__file__).parent.parent.resolve()
ALWAYS_TEST_PIPELINE_MODULES = [
"controlnet",
"controlnet_flux",
"controlnet_sd3",
"stable_diffusion",
"stable_diffusion_2",
"stable_diffusion_3",
"stable_diffusion_xl",
"ip_adapters",
"flux",
]
PIPELINE_USAGE_CUTOFF = int(os.getenv("PIPELINE_USAGE_CUTOFF", 50000))
logger = logging.getLogger(__name__)
api = HfApi()
def filter_pipelines(usage_dict, usage_cutoff=10000):
output = []
for diffusers_object, usage in usage_dict.items():
if usage < usage_cutoff:
continue
is_diffusers_pipeline = hasattr(diffusers.pipelines, diffusers_object)
if not is_diffusers_pipeline:
continue
output.append(diffusers_object)
return output
def fetch_pipeline_objects():
models = api.list_models(library="diffusers")
downloads = defaultdict(int)
for model in models:
is_counted = False
for tag in model.tags:
if tag.startswith("diffusers:"):
is_counted = True
downloads[tag[len("diffusers:") :]] += model.downloads
if not is_counted:
downloads["other"] += model.downloads
# Remove 0 downloads
downloads = {k: v for k, v in downloads.items() if v > 0}
pipeline_objects = filter_pipelines(downloads, PIPELINE_USAGE_CUTOFF)
return pipeline_objects
def fetch_pipeline_modules_to_test():
try:
pipeline_objects = fetch_pipeline_objects()
except Exception as e:
logger.error(e)
raise RuntimeError("Unable to fetch model list from HuggingFace Hub.")
test_modules = []
for pipeline_name in pipeline_objects:
module = getattr(diffusers, pipeline_name)
test_module = module.__module__.split(".")[-2].strip()
test_modules.append(test_module)
return test_modules
def main():
test_modules = fetch_pipeline_modules_to_test()
test_modules.extend(ALWAYS_TEST_PIPELINE_MODULES)
# Get unique modules
test_modules = sorted(set(test_modules))
print(json.dumps(test_modules))
save_path = f"{PATH_TO_REPO}/reports"
os.makedirs(save_path, exist_ok=True)
with open(f"{save_path}/test-pipelines.json", "w") as f:
json.dump({"pipeline_test_modules": test_modules}, f)
if __name__ == "__main__":
main()
|
import json
import logging
import os
from collections import defaultdict
from pathlib import Path
from huggingface_hub import HfApi
import diffusers
PATH_TO_REPO = Path(__file__).parent.parent.resolve()
ALWAYS_TEST_PIPELINE_MODULES = [
"controlnet",
"stable_diffusion",
"stable_diffusion_2",
"stable_diffusion_xl",
"stable_diffusion_adapter",
"ip_adapters",
"kandinsky2_2",
]
PIPELINE_USAGE_CUTOFF = int(os.getenv("PIPELINE_USAGE_CUTOFF", 50000))
logger = logging.getLogger(__name__)
api = HfApi()
def filter_pipelines(usage_dict, usage_cutoff=10000):
output = []
for diffusers_object, usage in usage_dict.items():
if usage < usage_cutoff:
continue
is_diffusers_pipeline = hasattr(diffusers.pipelines, diffusers_object)
if not is_diffusers_pipeline:
continue
output.append(diffusers_object)
return output
def fetch_pipeline_objects():
models = api.list_models(library="diffusers")
downloads = defaultdict(int)
for model in models:
is_counted = False
for tag in model.tags:
if tag.startswith("diffusers:"):
is_counted = True
downloads[tag[len("diffusers:") :]] += model.downloads
if not is_counted:
downloads["other"] += model.downloads
# Remove 0 downloads
downloads = {k: v for k, v in downloads.items() if v > 0}
pipeline_objects = filter_pipelines(downloads, PIPELINE_USAGE_CUTOFF)
return pipeline_objects
def fetch_pipeline_modules_to_test():
try:
pipeline_objects = fetch_pipeline_objects()
except Exception as e:
logger.error(e)
raise RuntimeError("Unable to fetch model list from HuggingFace Hub.")
test_modules = []
for pipeline_name in pipeline_objects:
module = getattr(diffusers, pipeline_name)
test_module = module.__module__.split(".")[-2].strip()
test_modules.append(test_module)
return test_modules
def main():
test_modules = fetch_pipeline_modules_to_test()
test_modules.extend(ALWAYS_TEST_PIPELINE_MODULES)
# Get unique modules
test_modules = sorted(set(test_modules))
print(json.dumps(test_modules))
save_path = f"{PATH_TO_REPO}/reports"
os.makedirs(save_path, exist_ok=True)
with open(f"{save_path}/test-pipelines.json", "w") as f:
json.dump({"pipeline_test_modules": test_modules}, f)
if __name__ == "__main__":
main()
|
from typing import Optional
from llama_index.core.base.llms.types import ChatMessage
from typing_extensions import NotRequired, TypedDict
XINFERENCE_MODEL_SIZES = {
"baichuan": 2048,
"baichuan-chat": 2048,
"wizardlm-v1.0": 2048,
"vicuna-v1.3": 2048,
"orca": 2048,
"chatglm": 2048,
"chatglm2": 8192,
"llama-2-chat": 4096,
"llama-2": 4096,
}
class ChatCompletionMessage(TypedDict):
role: str
content: Optional[str]
user: NotRequired[str]
def xinference_message_to_history(message: ChatMessage) -> ChatCompletionMessage:
return ChatCompletionMessage(role=message.role, content=message.content)
def xinference_modelname_to_contextsize(modelname: str) -> int:
context_size = XINFERENCE_MODEL_SIZES.get(modelname)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid OpenAI model name."
"Known models are: " + ", ".join(XINFERENCE_MODEL_SIZES.keys())
)
return context_size
|
from typing import Optional
from llama_index.core.base.llms.types import ChatMessage
from typing_extensions import NotRequired, TypedDict
XINFERENCE_MODEL_SIZES = {
"baichuan": 2048,
"baichuan-chat": 2048,
"wizardlm-v1.0": 2048,
"vicuna-v1.3": 2048,
"orca": 2048,
"chatglm": 2048,
"chatglm2": 8192,
"llama-2-chat": 4096,
"llama-2": 4096,
}
class ChatCompletionMessage(TypedDict):
role: str
content: Optional[str]
user: NotRequired[str]
def xinference_message_to_history(message: ChatMessage) -> ChatCompletionMessage:
return ChatCompletionMessage(role=message.role, content=message.content)
def xinference_modelname_to_contextsize(modelname: str) -> int:
context_size = XINFERENCE_MODEL_SIZES.get(modelname, None)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid OpenAI model name."
"Known models are: " + ", ".join(XINFERENCE_MODEL_SIZES.keys())
)
return context_size
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.vgg16 import VGG16 as VGG16
from keras.src.applications.vgg16 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.vgg16 import preprocess_input as preprocess_input
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.vgg16 import VGG16
from keras.src.applications.vgg16 import decode_predictions
from keras.src.applications.vgg16 import preprocess_input
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import BaseModule
from mmdet.models.builder import HEADS
from ...core import bbox_cxcywh_to_xyxy
@HEADS.register_module()
class EmbeddingRPNHead(BaseModule):
"""RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ .
Unlike traditional RPNHead, this module does not need FPN input, but just
decode `init_proposal_bboxes` and expand the first dimension of
`init_proposal_bboxes` and `init_proposal_features` to the batch_size.
Args:
num_proposals (int): Number of init_proposals. Default 100.
proposal_feature_channel (int): Channel number of
init_proposal_feature. Defaults to 256.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_proposals=100,
proposal_feature_channel=256,
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(EmbeddingRPNHead, self).__init__(init_cfg)
self.num_proposals = num_proposals
self.proposal_feature_channel = proposal_feature_channel
self._init_layers()
def _init_layers(self):
"""Initialize a sparse set of proposal boxes and proposal features."""
self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)
self.init_proposal_features = nn.Embedding(
self.num_proposals, self.proposal_feature_channel)
def init_weights(self):
"""Initialize the init_proposal_bboxes as normalized.
[c_x, c_y, w, h], and we initialize it to the size of the entire
image.
"""
super(EmbeddingRPNHead, self).init_weights()
nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)
nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)
def _decode_init_proposals(self, imgs, img_metas):
"""Decode init_proposal_bboxes according to the size of images and
expand dimension of init_proposal_features to batch_size.
Args:
imgs (list[Tensor]): List of FPN features.
img_metas (list[dict]): List of meta-information of
images. Need the img_shape to decode the init_proposals.
Returns:
Tuple(Tensor):
- proposals (Tensor): Decoded proposal bboxes,
has shape (batch_size, num_proposals, 4).
- init_proposal_features (Tensor): Expanded proposal
features, has shape
(batch_size, num_proposals, proposal_feature_channel).
- imgs_whwh (Tensor): Tensor with shape
(batch_size, 4), the dimension means
[img_width, img_height, img_width, img_height].
"""
proposals = self.init_proposal_bboxes.weight.clone()
proposals = bbox_cxcywh_to_xyxy(proposals)
num_imgs = len(imgs[0])
imgs_whwh = []
for meta in img_metas:
h, w, _ = meta['img_shape']
imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]]))
imgs_whwh = torch.cat(imgs_whwh, dim=0)
imgs_whwh = imgs_whwh[:, None, :]
# imgs_whwh has shape (batch_size, 1, 4)
# The shape of proposals change from (num_proposals, 4)
# to (batch_size ,num_proposals, 4)
proposals = proposals * imgs_whwh
init_proposal_features = self.init_proposal_features.weight.clone()
init_proposal_features = init_proposal_features[None].expand(
num_imgs, *init_proposal_features.size())
return proposals, init_proposal_features, imgs_whwh
def forward_dummy(self, img, img_metas):
"""Dummy forward function.
Used in flops calculation.
"""
return self._decode_init_proposals(img, img_metas)
def forward_train(self, img, img_metas):
"""Forward function in training stage."""
return self._decode_init_proposals(img, img_metas)
def simple_test_rpn(self, img, img_metas):
"""Forward function in testing stage."""
return self._decode_init_proposals(img, img_metas)
def simple_test(self, img, img_metas):
"""Forward function in testing stage."""
raise NotImplementedError
def aug_test_rpn(self, feats, img_metas):
raise NotImplementedError(
'EmbeddingRPNHead does not support test-time augmentation')
|
import torch
import torch.nn as nn
from mmcv.runner import BaseModule
from mmdet.models.builder import HEADS
from ...core import bbox_cxcywh_to_xyxy
@HEADS.register_module()
class EmbeddingRPNHead(BaseModule):
"""RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ .
Unlike traditional RPNHead, this module does not need FPN input, but just
decode `init_proposal_bboxes` and expand the first dimension of
`init_proposal_bboxes` and `init_proposal_features` to the batch_size.
Args:
num_proposals (int): Number of init_proposals. Default 100.
proposal_feature_channel (int): Channel number of
init_proposal_feature. Defaults to 256.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_proposals=100,
proposal_feature_channel=256,
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(EmbeddingRPNHead, self).__init__(init_cfg)
self.num_proposals = num_proposals
self.proposal_feature_channel = proposal_feature_channel
self._init_layers()
def _init_layers(self):
"""Initialize a sparse set of proposal boxes and proposal features."""
self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)
self.init_proposal_features = nn.Embedding(
self.num_proposals, self.proposal_feature_channel)
def init_weights(self):
"""Initialize the init_proposal_bboxes as normalized.
[c_x, c_y, w, h], and we initialize it to the size of the entire
image.
"""
super(EmbeddingRPNHead, self).init_weights()
nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)
nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)
def _decode_init_proposals(self, imgs, img_metas):
"""Decode init_proposal_bboxes according to the size of images and
expand dimension of init_proposal_features to batch_size.
Args:
imgs (list[Tensor]): List of FPN features.
img_metas (list[dict]): List of meta-information of
images. Need the img_shape to decode the init_proposals.
Returns:
Tuple(Tensor):
- proposals (Tensor): Decoded proposal bboxes,
has shape (batch_size, num_proposals, 4).
- init_proposal_features (Tensor): Expanded proposal
features, has shape
(batch_size, num_proposals, proposal_feature_channel).
- imgs_whwh (Tensor): Tensor with shape
(batch_size, 4), the dimension means
[img_width, img_height, img_width, img_height].
"""
proposals = self.init_proposal_bboxes.weight.clone()
proposals = bbox_cxcywh_to_xyxy(proposals)
num_imgs = len(imgs[0])
imgs_whwh = []
for meta in img_metas:
h, w, _ = meta['img_shape']
imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]]))
imgs_whwh = torch.cat(imgs_whwh, dim=0)
imgs_whwh = imgs_whwh[:, None, :]
# imgs_whwh has shape (batch_size, 1, 4)
# The shape of proposals change from (num_proposals, 4)
# to (batch_size ,num_proposals, 4)
proposals = proposals * imgs_whwh
init_proposal_features = self.init_proposal_features.weight.clone()
init_proposal_features = init_proposal_features[None].expand(
num_imgs, *init_proposal_features.size())
return proposals, init_proposal_features, imgs_whwh
def forward_dummy(self, img, img_metas):
"""Dummy forward function.
Used in flops calculation.
"""
return self._decode_init_proposals(img, img_metas)
def forward_train(self, img, img_metas):
"""Forward function in training stage."""
return self._decode_init_proposals(img, img_metas)
def simple_test_rpn(self, img, img_metas):
"""Forward function in testing stage."""
return self._decode_init_proposals(img, img_metas)
def simple_test(self, img, img_metas):
"""Forward function in testing stage."""
raise NotImplementedError
def aug_test_rpn(self, feats, img_metas):
raise NotImplementedError(
'EmbeddingRPNHead does not support test-time augmentation')
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.18.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.17.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Sequence
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class RuntimeInfoHook(Hook):
"""A hook that updates runtime information into message hub.
E.g. ``epoch``, ``iter``, ``max_epochs``, and ``max_iters`` for the
training state. Components that cannot access the runner can get runtime
information through the message hub.
"""
priority = 'VERY_HIGH'
def before_run(self, runner) -> None:
"""Initialize runtime information."""
runner.message_hub.update_info('epoch', runner.epoch)
runner.message_hub.update_info('iter', runner.iter)
runner.message_hub.update_info('max_epochs', runner.max_epochs)
runner.message_hub.update_info('max_iters', runner.max_iters)
def before_train(self, runner) -> None:
"""Update resumed training state."""
runner.message_hub.update_info('epoch', runner.epoch)
runner.message_hub.update_info('iter', runner.iter)
def before_train_epoch(self, runner) -> None:
"""Update current epoch information before every epoch."""
runner.message_hub.update_info('epoch', runner.epoch)
def before_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None) -> None:
"""Update current iter and learning rate information before every
iteration."""
runner.message_hub.update_info('iter', runner.iter)
lr_dict = runner.optim_wrapper.get_lr()
assert isinstance(lr_dict, dict), (
'`runner.optim_wrapper.get_lr()` should return a dict '
'of learning rate when training with OptimWrapper(single '
'optimizer) or OptimWrapperDict(multiple optimizer), '
f'but got {type(lr_dict)} please check your optimizer '
'constructor return an `OptimWrapper` or `OptimWrapperDict` '
'instance')
for name, lr in lr_dict.items():
runner.message_hub.update_scalar(f'train/{name}', lr[0])
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Update ``log_vars`` in model outputs every iteration."""
if outputs is not None:
for key, value in outputs.items():
runner.message_hub.update_scalar(f'train/{key}', value)
def after_val_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""All subclasses should override this method, if they need any
operations after each validation epoch.
Args:
runner (Runner): The runner of the validation process.
metrics (Dict[str, float], optional): Evaluation results of all
metrics on validation dataset. The keys are the names of the
metrics, and the values are corresponding results.
"""
if metrics is not None:
for key, value in metrics.items():
runner.message_hub.update_scalar(f'val/{key}', value)
def after_test_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""All subclasses should override this method, if they need any
operations after each test epoch.
Args:
runner (Runner): The runner of the testing process.
metrics (Dict[str, float], optional): Evaluation results of all
metrics on test dataset. The keys are the names of the
metrics, and the values are corresponding results.
"""
if metrics is not None:
for key, value in metrics.items():
runner.message_hub.update_scalar(f'test/{key}', value)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Sequence
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class RuntimeInfoHook(Hook):
"""A hook that updates runtime information into message hub.
E.g. ``epoch``, ``iter``, ``max_epochs``, and ``max_iters`` for the
training state. Components that cannot access the runner can get runtime
information through the message hub.
"""
priority = 'VERY_HIGH'
def before_run(self, runner) -> None:
"""Initialize runtime information."""
runner.message_hub.update_info('epoch', runner.epoch)
runner.message_hub.update_info('iter', runner.iter)
runner.message_hub.update_info('max_epochs', runner.max_epochs)
runner.message_hub.update_info('max_iters', runner.max_iters)
def before_train(self, runner) -> None:
"""Update resumed training state."""
runner.message_hub.update_info('epoch', runner.epoch)
runner.message_hub.update_info('iter', runner.iter)
def before_train_epoch(self, runner) -> None:
"""Update current epoch information before every epoch."""
runner.message_hub.update_info('epoch', runner.epoch)
def before_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None) -> None:
"""Update current iter and learning rate information before every
iteration."""
runner.message_hub.update_info('iter', runner.iter)
lr_dict = runner.optim_wrapper.get_lr()
assert isinstance(lr_dict, dict), (
'`runner.optim_wrapper.get_lr()` should return a dict '
'of learning rate when training with OptimWrapper(single '
'optimizer) or OptimWrapperDict(multiple optimizer), '
f'but got {type(lr_dict)} please check your optimizer '
'constructor return an `OptimWrapper` or `OptimWrapperDict` '
'instance')
for name, lr in lr_dict.items():
runner.message_hub.update_scalar(f'train/{name}', lr[0])
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Update ``log_vars`` in model outputs every iteration."""
if outputs is not None:
for key, value in outputs['log_vars'].items():
runner.message_hub.update_scalar(f'train/{key}', value)
def after_val_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""All subclasses should override this method, if they need any
operations after each validation epoch.
Args:
runner (Runner): The runner of the validation process.
metrics (Dict[str, float], optional): Evaluation results of all
metrics on validation dataset. The keys are the names of the
metrics, and the values are corresponding results.
"""
if metrics is not None:
for key, value in metrics.items():
runner.message_hub.update_scalar(f'val/{key}', value)
def after_test_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""All subclasses should override this method, if they need any
operations after each test epoch.
Args:
runner (Runner): The runner of the testing process.
metrics (Dict[str, float], optional): Evaluation results of all
metrics on test dataset. The keys are the names of the
metrics, and the values are corresponding results.
"""
if metrics is not None:
for key, value in metrics.items():
runner.message_hub.update_scalar(f'test/{key}', value)
|
"""Tests for the Google Cloud DocAI parser."""
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.document_loaders.parsers import (
AzureAIDocumentIntelligenceParser,
)
@pytest.mark.requires("azure", "azure.ai", "azure.ai.documentintelligence")
@patch("azure.ai.documentintelligence.DocumentIntelligenceClient")
@patch("azure.core.credentials.AzureKeyCredential")
def test_doc_intelligence(mock_credential: MagicMock, mock_client: MagicMock) -> None:
endpoint = "endpoint"
key = "key"
parser = AzureAIDocumentIntelligenceParser(api_endpoint=endpoint, api_key=key)
mock_credential.assert_called_once_with(key)
mock_client.assert_called_once_with(
endpoint=endpoint,
credential=mock_credential(),
headers={
"x-ms-useragent": "langchain-parser/1.0.0",
},
)
assert parser.client == mock_client()
assert parser.api_model == "prebuilt-layout"
assert parser.mode == "markdown"
@pytest.mark.requires("azure", "azure.ai", "azure.ai.documentintelligence")
@patch("azure.ai.documentintelligence.DocumentIntelligenceClient")
@patch("azure.core.credentials.AzureKeyCredential")
def test_doc_intelligence_with_analysis_features(
mock_credential: MagicMock, mock_client: MagicMock
) -> None:
endpoint = "endpoint"
key = "key"
analysis_features = ["ocrHighResolution", "barcodes"]
parser = AzureAIDocumentIntelligenceParser(
api_endpoint=endpoint, api_key=key, analysis_features=analysis_features
)
mock_credential.assert_called_once_with(key)
mock_client.assert_called_once_with(
endpoint=endpoint,
credential=mock_credential(),
headers={
"x-ms-useragent": "langchain-parser/1.0.0",
},
)
assert parser.client == mock_client()
assert parser.api_model == "prebuilt-layout"
assert parser.mode == "markdown"
with pytest.raises(ValueError):
analysis_features = ["invalid"]
parser = AzureAIDocumentIntelligenceParser(
api_endpoint=endpoint, api_key=key, analysis_features=analysis_features
)
|
"""Tests for the Google Cloud DocAI parser."""
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.document_loaders.parsers import (
AzureAIDocumentIntelligenceParser,
)
@pytest.mark.requires("azure", "azure.ai", "azure.ai.documentintelligence")
@patch("azure.ai.documentintelligence.DocumentIntelligenceClient")
@patch("azure.core.credentials.AzureKeyCredential")
def test_doc_intelligence(mock_credential: MagicMock, mock_client: MagicMock) -> None:
endpoint = "endpoint"
key = "key"
parser = AzureAIDocumentIntelligenceParser(api_endpoint=endpoint, api_key=key)
mock_credential.assert_called_once_with(key)
mock_client.assert_called_once_with(
endpoint=endpoint,
credential=mock_credential(),
headers={
"x-ms-useragent": "langchain-parser/1.0.0",
},
features=None,
)
assert parser.client == mock_client()
assert parser.api_model == "prebuilt-layout"
assert parser.mode == "markdown"
@pytest.mark.requires("azure", "azure.ai", "azure.ai.documentintelligence")
@patch("azure.ai.documentintelligence.DocumentIntelligenceClient")
@patch("azure.core.credentials.AzureKeyCredential")
def test_doc_intelligence_with_analysis_features(
mock_credential: MagicMock, mock_client: MagicMock
) -> None:
endpoint = "endpoint"
key = "key"
analysis_features = ["ocrHighResolution", "barcodes"]
parser = AzureAIDocumentIntelligenceParser(
api_endpoint=endpoint, api_key=key, analysis_features=analysis_features
)
mock_credential.assert_called_once_with(key)
mock_client.assert_called_once_with(
endpoint=endpoint,
credential=mock_credential(),
headers={
"x-ms-useragent": "langchain-parser/1.0.0",
},
features=analysis_features,
)
assert parser.client == mock_client()
assert parser.api_model == "prebuilt-layout"
assert parser.mode == "markdown"
with pytest.raises(ValueError):
analysis_features = ["invalid"]
parser = AzureAIDocumentIntelligenceParser(
api_endpoint=endpoint, api_key=key, analysis_features=analysis_features
)
|
"""
Tests for sklearn.cluster._feature_agglomeration
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.cluster import FeatureAgglomeration
from sklearn.datasets import make_blobs
from sklearn.utils._testing import assert_array_almost_equal
def test_feature_agglomeration():
n_clusters = 1
X = np.array([0, 0, 1]).reshape(1, 3) # (n_samples, n_features)
agglo_mean = FeatureAgglomeration(n_clusters=n_clusters, pooling_func=np.mean)
agglo_median = FeatureAgglomeration(n_clusters=n_clusters, pooling_func=np.median)
agglo_mean.fit(X)
agglo_median.fit(X)
assert np.size(np.unique(agglo_mean.labels_)) == n_clusters
assert np.size(np.unique(agglo_median.labels_)) == n_clusters
assert np.size(agglo_mean.labels_) == X.shape[1]
assert np.size(agglo_median.labels_) == X.shape[1]
# Test transform
Xt_mean = agglo_mean.transform(X)
Xt_median = agglo_median.transform(X)
assert Xt_mean.shape[1] == n_clusters
assert Xt_median.shape[1] == n_clusters
assert Xt_mean == np.array([1 / 3.0])
assert Xt_median == np.array([0.0])
# Test inverse transform
X_full_mean = agglo_mean.inverse_transform(Xt_mean)
X_full_median = agglo_median.inverse_transform(Xt_median)
assert np.unique(X_full_mean[0]).size == n_clusters
assert np.unique(X_full_median[0]).size == n_clusters
assert_array_almost_equal(agglo_mean.transform(X_full_mean), Xt_mean)
assert_array_almost_equal(agglo_median.transform(X_full_median), Xt_median)
def test_feature_agglomeration_feature_names_out():
"""Check `get_feature_names_out` for `FeatureAgglomeration`."""
X, _ = make_blobs(n_features=6, random_state=0)
agglo = FeatureAgglomeration(n_clusters=3)
agglo.fit(X)
n_clusters = agglo.n_clusters_
names_out = agglo.get_feature_names_out()
assert_array_equal(
[f"featureagglomeration{i}" for i in range(n_clusters)], names_out
)
|
"""
Tests for sklearn.cluster._feature_agglomeration
"""
import warnings
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn.cluster import FeatureAgglomeration
from sklearn.datasets import make_blobs
from sklearn.utils._testing import assert_array_almost_equal
def test_feature_agglomeration():
n_clusters = 1
X = np.array([0, 0, 1]).reshape(1, 3) # (n_samples, n_features)
agglo_mean = FeatureAgglomeration(n_clusters=n_clusters, pooling_func=np.mean)
agglo_median = FeatureAgglomeration(n_clusters=n_clusters, pooling_func=np.median)
agglo_mean.fit(X)
agglo_median.fit(X)
assert np.size(np.unique(agglo_mean.labels_)) == n_clusters
assert np.size(np.unique(agglo_median.labels_)) == n_clusters
assert np.size(agglo_mean.labels_) == X.shape[1]
assert np.size(agglo_median.labels_) == X.shape[1]
# Test transform
Xt_mean = agglo_mean.transform(X)
Xt_median = agglo_median.transform(X)
assert Xt_mean.shape[1] == n_clusters
assert Xt_median.shape[1] == n_clusters
assert Xt_mean == np.array([1 / 3.0])
assert Xt_median == np.array([0.0])
# Test inverse transform
X_full_mean = agglo_mean.inverse_transform(Xt_mean)
X_full_median = agglo_median.inverse_transform(Xt_median)
assert np.unique(X_full_mean[0]).size == n_clusters
assert np.unique(X_full_median[0]).size == n_clusters
assert_array_almost_equal(agglo_mean.transform(X_full_mean), Xt_mean)
assert_array_almost_equal(agglo_median.transform(X_full_median), Xt_median)
def test_feature_agglomeration_feature_names_out():
"""Check `get_feature_names_out` for `FeatureAgglomeration`."""
X, _ = make_blobs(n_features=6, random_state=0)
agglo = FeatureAgglomeration(n_clusters=3)
agglo.fit(X)
n_clusters = agglo.n_clusters_
names_out = agglo.get_feature_names_out()
assert_array_equal(
[f"featureagglomeration{i}" for i in range(n_clusters)], names_out
)
# TODO(1.7): remove this test
def test_inverse_transform_Xt_deprecation():
X = np.array([0, 0, 1]).reshape(1, 3) # (n_samples, n_features)
est = FeatureAgglomeration(n_clusters=1, pooling_func=np.mean)
est.fit(X)
X = est.transform(X)
with pytest.raises(TypeError, match="Missing required positional argument"):
est.inverse_transform()
with pytest.raises(TypeError, match="Cannot use both X and Xt. Use X only."):
est.inverse_transform(X=X, Xt=X)
with warnings.catch_warnings(record=True):
warnings.simplefilter("error")
est.inverse_transform(X)
with pytest.warns(FutureWarning, match="Xt was renamed X in version 1.5"):
est.inverse_transform(Xt=X)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
this test check the docstring of all of our public API. It does it
by checking the `__all__` of each of our namespace.
to add a new namespace you need to
* import it
* add it to the `SUB_MODULE_TO_CHECK` list
"""
import pytest
from mktestdocs import check_docstring, get_codeblock_members
import docarray.data
import docarray.documents
import docarray.index
import docarray.store
import docarray.typing
from docarray.utils import filter, find, map
SUB_MODULE_TO_CHECK = [
docarray,
docarray.index,
docarray.data,
docarray.documents,
docarray.store,
docarray.typing,
find,
map,
filter,
]
def get_obj_to_check(lib):
obj_to_check = []
all_test = getattr(lib, '__all__')
try:
all_test = getattr(lib, '__all_test__')
except (AttributeError, ImportError):
pass
for obj in all_test:
obj_to_check.append(getattr(lib, obj))
return obj_to_check
obj_to_check = []
for lib in SUB_MODULE_TO_CHECK:
obj_to_check.extend(get_obj_to_check(lib))
members = []
for obj in obj_to_check:
members.extend(get_codeblock_members(obj))
@pytest.mark.parametrize("obj", members, ids=lambda d: d.__qualname__)
def test_member(obj):
check_docstring(obj)
|
"""
this test check the docstring of all of our public API. It does it
by checking the `__all__` of each of our namespace.
to add a new namespace you need to
* import it
* add it to the `SUB_MODULE_TO_CHECK` list
"""
import pytest
from mktestdocs import check_docstring, get_codeblock_members
import docarray.data
import docarray.documents
import docarray.index
import docarray.store
import docarray.typing
from docarray.utils import filter, find, map
SUB_MODULE_TO_CHECK = [
docarray,
docarray.index,
docarray.data,
docarray.documents,
docarray.store,
docarray.typing,
find,
map,
filter,
]
def get_obj_to_check(lib):
obj_to_check = []
all_test = getattr(lib, '__all__')
try:
all_test = getattr(lib, '__all_test__')
except (AttributeError, ImportError):
pass
for obj in all_test:
obj_to_check.append(getattr(lib, obj))
return obj_to_check
obj_to_check = []
for lib in SUB_MODULE_TO_CHECK:
obj_to_check.extend(get_obj_to_check(lib))
members = []
for obj in obj_to_check:
members.extend(get_codeblock_members(obj))
@pytest.mark.parametrize("obj", members, ids=lambda d: d.__qualname__)
def test_member(obj):
check_docstring(obj)
|
try:
from docarray import BaseDoc as Document
from docarray import DocList as DocumentArray
docarray_v2 = True
from jina._docarray_legacy import LegacyDocumentJina
except ImportError:
from docarray import Document, DocumentArray
docarray_v2 = False
import pydantic
is_pydantic_v2 = pydantic.__version__.startswith('2.')
|
try:
from docarray import BaseDoc as Document
from docarray import DocList as DocumentArray
docarray_v2 = True
except ImportError:
from docarray import Document, DocumentArray
docarray_v2 = False
|
from llama_index.core import Document, MockEmbedding
from llama_index.core.llms import MockLLM
from llama_index.packs.raptor.base import RaptorRetriever
def test_raptor() -> None:
retriever = RaptorRetriever(
[
Document(text="one"),
Document(text="two"),
Document(text="three"),
Document(text="four"),
Document(text="five"),
Document(text="six"),
Document(text="seven"),
Document(text="eight"),
Document(text="nine"),
Document(text="ten"),
],
embed_model=MockEmbedding(embed_dim=1536),
llm=MockLLM(),
)
assert len(retriever.index.docstore.docs) == 13
nodes = retriever.retrieve("test", mode="collapsed")
assert len(nodes) == 2
nodes = retriever.retrieve("text", mode="tree_traversal")
assert len(nodes) == 5
|
from llama_index.core import Document, MockEmbedding, global_tokenizer
from llama_index.core.llms import MockLLM
from llama_index.packs.raptor.base import RaptorRetriever
import pytest
@pytest.mark.skipif(
condition=(global_tokenizer is None), reason="No global tokenizer set"
)
def test_raptor() -> None:
retriever = RaptorRetriever(
[
Document(text="one"),
Document(text="two"),
Document(text="three"),
Document(text="four"),
Document(text="five"),
Document(text="six"),
Document(text="seven"),
Document(text="eight"),
Document(text="nine"),
Document(text="ten"),
],
embed_model=MockEmbedding(embed_dim=1536),
llm=MockLLM(),
)
assert len(retriever.index.docstore.docs) == 13
nodes = retriever.retrieve("test", mode="collapsed")
assert len(nodes) == 2
nodes = retriever.retrieve("text", mode="tree_traversal")
assert len(nodes) == 5
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Any, Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataSample
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
batch_idx: int,
data_batch: Optional[Sequence[Tuple[Any, BaseDataSample]]] = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the test loop.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataSample], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_iters(runner, self._interval):
inputs, data_samples = data_batch # type: ignore
inputs = tensor2imgs(inputs,
**data_samples[0].get('img_norm_cfg', dict()))
for input, data_sample, output in zip(
inputs,
data_samples, # type: ignore
outputs): # type: ignore
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.writer.add_image(name, origin_image, data_sample,
output, self.draw_gt, self.draw_pred)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Any, Optional, Sequence, Tuple
import cv2
import numpy as np
from mmengine.data import BaseDataSample
from mmengine.hooks import Hook
from mmengine.registry import HOOKS
from mmengine.utils.misc import tensor2imgs
@HOOKS.register_module()
class NaiveVisualizationHook(Hook):
"""Show or Write the predicted results during the process of testing.
Args:
interval (int): Visualization interval. Default: 1.
draw_gt (bool): Whether to draw the ground truth. Default to True.
draw_pred (bool): Whether to draw the predicted result.
Default to True.
"""
priority = 'NORMAL'
def __init__(self,
interval: int = 1,
draw_gt: bool = True,
draw_pred: bool = True):
self.draw_gt = draw_gt
self.draw_pred = draw_pred
self._interval = interval
def _unpad(self, input: np.ndarray, unpad_shape: Tuple[int,
int]) -> np.ndarray:
unpad_width, unpad_height = unpad_shape
unpad_image = input[:unpad_height, :unpad_width]
return unpad_image
def after_test_iter(
self,
runner,
data_batch: Optional[Sequence[Tuple[Any, BaseDataSample]]] = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Show or Write the predicted results.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataSample], optional): Outputs from model.
Defaults to None.
"""
if self.every_n_iters(runner, self._interval):
inputs, data_samples = data_batch # type: ignore
inputs = tensor2imgs(inputs,
**data_samples[0].get('img_norm_cfg', dict()))
for input, data_sample, output in zip(
inputs,
data_samples, # type: ignore
outputs): # type: ignore
# TODO We will implement a function to revert the augmentation
# in the future.
ori_shape = (data_sample.ori_width, data_sample.ori_height)
if 'pad_shape' in data_sample:
input = self._unpad(input,
data_sample.get('scale', ori_shape))
origin_image = cv2.resize(input, ori_shape)
name = osp.basename(data_sample.img_path)
runner.writer.add_image(name, origin_image, data_sample,
output, self.draw_gt, self.draw_pred)
|
import requests as req
from docarray import DocumentArray
from prometheus_client import Summary
from jina import Executor, Flow, monitor, requests
def test_prometheus_interface(port_generator):
class DummyExecutor(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.summary = Summary(
'a', 'A', registry=self.runtime_args.metrics_registry
)
@requests(on='/foo')
def foo(self, docs, **kwargs):
with self.summary.time():
...
port = port_generator()
with Flow(monitoring=True, port_monitoring=port_generator()).add(
uses=DummyExecutor, monitoring=True, port_monitoring=port
) as f:
f.post('/foo', inputs=DocumentArray.empty(4))
resp = req.get(f'http://localhost:{port}/')
assert f'a_count 1.0' in str( # check that we count 4 documents on foo
resp.content
)
def test_decorator_interface(port_generator):
class DummyExecutor(Executor):
@requests(on='/foo')
def foo(self, docs, **kwargs):
self._process(docs)
self.process_2(docs)
@monitor(name='metrics_name', documentation='metrics description')
def _process(self, docs): ...
@monitor()
def process_2(self, docs): ...
port = port_generator()
with Flow(monitoring=True, port_monitoring=port_generator()).add(
uses=DummyExecutor, monitoring=True, port_monitoring=port
) as f:
f.post('/foo', inputs=DocumentArray.empty(4))
resp = req.get(f'http://localhost:{port}/')
assert f'jina_metrics_name_count{{runtime_name="executor0/rep-0"}} 1.0' in str(
resp.content
)
assert (
f'jina_process_2_seconds_count{{runtime_name="executor0/rep-0"}} 1.0'
in str(resp.content)
)
def test_context_manager_interface(port_generator):
class DummyExecutor(Executor):
@requests(on='/foo')
def foo(self, docs, **kwargs):
with self.monitor(
name='process_seconds', documentation='process time in seconds '
):
self._process(docs)
with self.monitor(
name='process_2_seconds', documentation='process 2 time in seconds '
):
self.process_2(docs)
def _process(self, docs): ...
def process_2(self, docs): ...
port = port_generator()
with Flow(monitoring=True, port_monitoring=port_generator()).add(
uses=DummyExecutor, monitoring=True, port_monitoring=port
) as f:
f.post('/foo', inputs=DocumentArray.empty(4))
resp = req.get(f'http://localhost:{port}/')
assert (
f'jina_process_seconds_count{{runtime_name="executor0/rep-0"}} 1.0'
in str(resp.content)
)
assert (
f'jina_process_2_seconds_count{{runtime_name="executor0/rep-0"}} 1.0'
in str(resp.content)
)
|
import requests as req
from docarray import DocumentArray
from prometheus_client import Summary
from jina import Executor, Flow, monitor, requests
def test_prometheus_interface(port_generator):
class DummyExecutor(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.summary = Summary(
'a', 'A', registry=self.runtime_args.metrics_registry
)
@requests(on='/foo')
def foo(self, docs, **kwargs):
with self.summary.time():
...
port = port_generator()
with Flow(monitoring=True, port_monitoring=port_generator()).add(
uses=DummyExecutor, monitoring=True, port_monitoring=port
) as f:
f.post('/foo', inputs=DocumentArray.empty(4))
resp = req.get(f'http://localhost:{port}/')
assert f'a_count 1.0' in str( # check that we count 4 documents on foo
resp.content
)
def test_decorator_interface(port_generator):
class DummyExecutor(Executor):
@requests(on='/foo')
def foo(self, docs, **kwargs):
self._process(docs)
self.process_2(docs)
@monitor(name='metrics_name', documentation='metrics description')
def _process(self, docs):
...
@monitor()
def process_2(self, docs):
...
port = port_generator()
with Flow(monitoring=True, port_monitoring=port_generator()).add(
uses=DummyExecutor, monitoring=True, port_monitoring=port
) as f:
f.post('/foo', inputs=DocumentArray.empty(4))
resp = req.get(f'http://localhost:{port}/')
assert f'jina_metrics_name_count{{runtime_name="executor0/rep-0"}} 1.0' in str(
resp.content
)
assert (
f'jina_process_2_seconds_count{{runtime_name="executor0/rep-0"}} 1.0'
in str(resp.content)
)
def test_context_manager_interface(port_generator):
class DummyExecutor(Executor):
@requests(on='/foo')
def foo(self, docs, **kwargs):
with self.monitor(
name='process_seconds', documentation='process time in seconds '
):
self._process(docs)
with self.monitor(
name='process_2_seconds', documentation='process 2 time in seconds '
):
self.process_2(docs)
def _process(self, docs):
...
def process_2(self, docs):
...
port = port_generator()
with Flow(monitoring=True, port_monitoring=port_generator()).add(
uses=DummyExecutor, monitoring=True, port_monitoring=port
) as f:
f.post('/foo', inputs=DocumentArray.empty(4))
resp = req.get(f'http://localhost:{port}/')
assert (
f'jina_process_seconds_count{{runtime_name="executor0/rep-0"}} 1.0'
in str(resp.content)
)
assert (
f'jina_process_2_seconds_count{{runtime_name="executor0/rep-0"}} 1.0'
in str(resp.content)
)
|
import asyncio
from typing import TYPE_CHECKING, Optional, Tuple
import grpc
from jina.clients.base.retry import wait_or_raise_err
from jina.clients.helper import callback_exec
from jina.excepts import InternalNetworkError
from jina.proto import jina_pb2_grpc
from jina.serve.stream import RequestStreamer
if TYPE_CHECKING:
from jina.types.request import Request
class UnaryRpc:
"""Class that encapsulated the methods required to run unary rpc calls from the client. Instantiate a single class
for each client request.
"""
def __init__(
self,
channel,
continue_on_error,
metadata,
on_always,
on_done,
on_error,
p_bar,
req_iter,
max_attempts,
backoff_multiplier,
initial_backoff,
max_backoff,
logger,
show_progress,
compression,
client_args,
prefetch,
results_in_order,
**kwargs
):
self.results_in_order = results_in_order
self.prefetch = prefetch
self.client_args = client_args
self.compression = compression
self.show_progress = show_progress
self.logger = logger
self.max_backoff = max_backoff
self.initial_backoff = initial_backoff
self.backoff_multiplier = backoff_multiplier
self.max_attempts = max_attempts
self.req_iter = req_iter
self.p_bar = p_bar
self.on_error = on_error
self.on_done = on_done
self.on_always = on_always
self.metadata = metadata
self.continue_on_error = continue_on_error
self.channel = channel
self.kwargs = kwargs
async def unary_rpc_with_retry(self):
"""Wraps the unary rpc call with retry loop based on the retry params.
:yields: Responses received from the target.
"""
stub = jina_pb2_grpc.JinaSingleDataRequestRPCStub(self.channel)
def _request_handler(
request: 'Request',
) -> 'Tuple[asyncio.Future, Optional[asyncio.Future]]':
async def _with_retry(req: 'Request'):
for attempt in range(1, self.max_attempts + 1):
try:
return await stub.process_single_data(
req,
compression=self.compression,
metadata=self.metadata,
credentials=self.kwargs.get('credentials', None),
timeout=self.kwargs.get('timeout', None),
)
except (
grpc.aio.AioRpcError,
InternalNetworkError,
) as err:
await wait_or_raise_err(
attempt=attempt,
err=err,
max_attempts=self.max_attempts,
backoff_multiplier=self.backoff_multiplier,
initial_backoff=self.initial_backoff,
max_backoff=self.max_backoff,
)
return (
asyncio.ensure_future(_with_retry(request)),
None,
)
def _result_handler(resp):
callback_exec(
response=resp,
logger=self.logger,
on_error=self.on_error,
on_done=self.on_done,
on_always=self.on_always,
continue_on_error=self.continue_on_error,
)
return resp
streamer_args = vars(self.client_args)
if self.prefetch:
streamer_args['prefetch'] = self.prefetch
streamer = RequestStreamer(
request_handler=_request_handler,
result_handler=_result_handler,
iterate_sync_in_thread=False,
logger=self.logger,
**streamer_args,
)
async for response in streamer.stream(
request_iterator=self.req_iter, results_in_order=self.results_in_order
):
if self.show_progress:
self.p_bar.update()
yield response
|
import asyncio
from typing import TYPE_CHECKING, Optional, Tuple
import grpc
from jina.clients.base.retry import wait_or_raise_err
from jina.clients.helper import callback_exec
from jina.excepts import InternalNetworkError
from jina.proto import jina_pb2_grpc
from jina.serve.stream import RequestStreamer
if TYPE_CHECKING:
from jina.types.request import Request
class UnaryRpc:
"""Class that encapsulated the methods required to run unary rpc calls from the client. Instantiate a single class
for each client request.
"""
def __init__(
self,
channel,
continue_on_error,
metadata,
on_always,
on_done,
on_error,
p_bar,
req_iter,
max_attempts,
backoff_multiplier,
initial_backoff,
max_backoff,
logger,
show_progress,
compression,
client_args,
prefetch,
results_in_order,
**kwargs
):
self.results_in_order = results_in_order
self.prefetch = prefetch
self.client_args = client_args
self.compression = compression
self.show_progress = show_progress
self.logger = logger
self.max_backoff = max_backoff
self.initial_backoff = initial_backoff
self.backoff_multiplier = backoff_multiplier
self.max_attempts = max_attempts
self.req_iter = req_iter
self.p_bar = p_bar
self.on_error = on_error
self.on_done = on_done
self.on_always = on_always
self.metadata = metadata
self.continue_on_error = continue_on_error
self.channel = channel
self.kwargs = kwargs
async def unary_rpc_with_retry(self):
"""Wraps the unary rpc call with retry loop based on the retry params.
:yields: Responses received from the target.
"""
stub = jina_pb2_grpc.JinaSingleDataRequestRPCStub(self.channel)
def _request_handler(
request: 'Request',
) -> 'Tuple[asyncio.Future, Optional[asyncio.Future]]':
async def _with_retry(req: 'Request'):
for attempt in range(1, self.max_attempts + 1):
try:
return await stub.process_single_data(
req,
compression=self.compression,
metadata=self.metadata,
credentials=self.kwargs.get('credentials', None),
timeout=self.kwargs.get('timeout', None),
)
except (
grpc.aio.AioRpcError,
InternalNetworkError,
) as err:
await wait_or_raise_err(
attempt=attempt,
err=err,
max_attempts=self.max_attempts,
backoff_multiplier=self.backoff_multiplier,
initial_backoff=self.initial_backoff,
max_backoff=self.max_backoff,
)
return (
asyncio.ensure_future(_with_retry(request)),
None,
)
def _result_handler(resp):
callback_exec(
response=resp,
on_error=self.on_error,
on_done=self.on_done,
on_always=self.on_always,
continue_on_error=self.continue_on_error,
logger=self.logger,
)
return resp
streamer_args = vars(self.client_args)
if self.prefetch:
streamer_args['prefetch'] = self.prefetch
streamer = RequestStreamer(
request_handler=_request_handler,
result_handler=_result_handler,
iterate_sync_in_thread=False,
logger=self.logger,
**streamer_args,
)
async for response in streamer.stream(
request_iterator=self.req_iter, results_in_order=self.results_in_order
):
if self.show_progress:
self.p_bar.update()
yield response
|
"""Toolkit for interacting with a vector store."""
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_core.vectorstores import VectorStore
from pydantic import BaseModel, ConfigDict, Field
class VectorStoreInfo(BaseModel):
"""Information about a VectorStore."""
vectorstore: VectorStore = Field(exclude=True)
name: str
description: str
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
class VectorStoreToolkit(BaseToolkit):
"""Toolkit for interacting with a Vector Store."""
vectorstore_info: VectorStoreInfo = Field(exclude=True)
llm: BaseLanguageModel
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def get_tools(self) -> list[BaseTool]:
"""Get the tools in the toolkit."""
try:
from langchain_community.tools.vectorstore.tool import (
VectorStoreQATool,
VectorStoreQAWithSourcesTool,
)
except ImportError as e:
msg = "You need to install langchain-community to use this toolkit."
raise ImportError(msg) from e
description = VectorStoreQATool.get_description(
self.vectorstore_info.name,
self.vectorstore_info.description,
)
qa_tool = VectorStoreQATool(
name=self.vectorstore_info.name,
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
description = VectorStoreQAWithSourcesTool.get_description(
self.vectorstore_info.name,
self.vectorstore_info.description,
)
qa_with_sources_tool = VectorStoreQAWithSourcesTool(
name=f"{self.vectorstore_info.name}_with_sources",
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
return [qa_tool, qa_with_sources_tool]
class VectorStoreRouterToolkit(BaseToolkit):
"""Toolkit for routing between Vector Stores."""
vectorstores: list[VectorStoreInfo] = Field(exclude=True)
llm: BaseLanguageModel
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def get_tools(self) -> list[BaseTool]:
"""Get the tools in the toolkit."""
tools: list[BaseTool] = []
try:
from langchain_community.tools.vectorstore.tool import (
VectorStoreQATool,
)
except ImportError as e:
msg = "You need to install langchain-community to use this toolkit."
raise ImportError(msg) from e
for vectorstore_info in self.vectorstores:
description = VectorStoreQATool.get_description(
vectorstore_info.name,
vectorstore_info.description,
)
qa_tool = VectorStoreQATool(
name=vectorstore_info.name,
description=description,
vectorstore=vectorstore_info.vectorstore,
llm=self.llm,
)
tools.append(qa_tool)
return tools
|
"""Toolkit for interacting with a vector store."""
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain_core.tools.base import BaseToolkit
from langchain_core.vectorstores import VectorStore
from pydantic import BaseModel, ConfigDict, Field
class VectorStoreInfo(BaseModel):
"""Information about a VectorStore."""
vectorstore: VectorStore = Field(exclude=True)
name: str
description: str
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
class VectorStoreToolkit(BaseToolkit):
"""Toolkit for interacting with a Vector Store."""
vectorstore_info: VectorStoreInfo = Field(exclude=True)
llm: BaseLanguageModel
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def get_tools(self) -> list[BaseTool]:
"""Get the tools in the toolkit."""
try:
from langchain_community.tools.vectorstore.tool import (
VectorStoreQATool,
VectorStoreQAWithSourcesTool,
)
except ImportError:
msg = "You need to install langchain-community to use this toolkit."
raise ImportError(msg)
description = VectorStoreQATool.get_description(
self.vectorstore_info.name,
self.vectorstore_info.description,
)
qa_tool = VectorStoreQATool(
name=self.vectorstore_info.name,
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
description = VectorStoreQAWithSourcesTool.get_description(
self.vectorstore_info.name,
self.vectorstore_info.description,
)
qa_with_sources_tool = VectorStoreQAWithSourcesTool(
name=f"{self.vectorstore_info.name}_with_sources",
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
return [qa_tool, qa_with_sources_tool]
class VectorStoreRouterToolkit(BaseToolkit):
"""Toolkit for routing between Vector Stores."""
vectorstores: list[VectorStoreInfo] = Field(exclude=True)
llm: BaseLanguageModel
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def get_tools(self) -> list[BaseTool]:
"""Get the tools in the toolkit."""
tools: list[BaseTool] = []
try:
from langchain_community.tools.vectorstore.tool import (
VectorStoreQATool,
)
except ImportError:
msg = "You need to install langchain-community to use this toolkit."
raise ImportError(msg)
for vectorstore_info in self.vectorstores:
description = VectorStoreQATool.get_description(
vectorstore_info.name,
vectorstore_info.description,
)
qa_tool = VectorStoreQATool(
name=vectorstore_info.name,
description=description,
vectorstore=vectorstore_info.vectorstore,
llm=self.llm,
)
tools.append(qa_tool)
return tools
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import AzureCosmosDBVectorSearch
from langchain_community.vectorstores.azure_cosmos_db import CosmosDBSimilarityType
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CosmosDBSimilarityType": "langchain_community.vectorstores.azure_cosmos_db",
"AzureCosmosDBVectorSearch": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AzureCosmosDBVectorSearch",
"CosmosDBSimilarityType",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import AzureCosmosDBVectorSearch
from langchain_community.vectorstores.azure_cosmos_db import CosmosDBSimilarityType
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CosmosDBSimilarityType": "langchain_community.vectorstores.azure_cosmos_db",
"AzureCosmosDBVectorSearch": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CosmosDBSimilarityType",
"AzureCosmosDBVectorSearch",
]
|
from typing import Optional, TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
T = TypeVar('T', bound='TextUrl')
@_register_proto(proto_type_name='text_url')
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local file path.
"""
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
---
```python
from docarray import BaseDoc
from docarray.typing import TextUrl
class MyDoc(BaseDoc):
remote_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
)
remote_txt = doc.remote_url.load()
```
---
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = self.load_bytes(timeout=timeout)
return _bytes.decode(charset)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import TEXT_FILE_FORMATS
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='TextUrl')
@_register_proto(proto_type_name='text_url')
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
):
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_valid_text_extension = ext in TEXT_FILE_FORMATS or ext == ''
if not has_valid_text_extension:
raise ValueError('Text URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
---
```python
from docarray import BaseDoc
from docarray.typing import TextUrl
class MyDoc(BaseDoc):
remote_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
)
remote_txt = doc.remote_url.load()
```
---
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = self.load_bytes(timeout=timeout)
return _bytes.decode(charset)
|
from torch import nn, Tensor
from typing import Iterable, Dict
import torch.nn.functional as F
from enum import Enum
from ..SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""
The metric for the triplet loss
"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
):
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
:param model: SentenceTransformerModel
:param distance_metric: Function to compute distance between two embeddings. The class TripletDistanceMetric
contains common distance metrices that can be used.
:param triplet_margin: The negative should be at least this much further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
from torch.utils.data import DataLoader
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [
InputExample(texts=['Anchor 1', 'Positive 1', 'Negative 1']),
InputExample(texts=['Anchor 2', 'Positive 2', 'Negative 2']),
]
train_batch_size = 1
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.TripletLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
from torch import nn, Tensor
from typing import Iterable, Dict
import torch.nn.functional as F
from enum import Enum
from ..SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""
The metric for the triplet loss
"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
):
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
:param model: SentenceTransformerModel
:param distance_metric: Function to compute distance between two embeddings. The class TripletDistanceMetric
contains common distance metrices that can be used.
:param triplet_margin: The negative should be at least this much further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
from torch.utils.data import DataLoader
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [
InputExample(texts=['Anchor 1', 'Positive 1', 'Negative 1']),
InputExample(texts=['Anchor 2', 'Positive 2', 'Negative 2']),
]
train_batch_size = 1
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.TripletLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .pipeline_switch_hook import PipelineSwitchHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .utils import trigger_visualization_hook
from .visualization_hook import DetVisualizationHook, TrackVisualizationHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook', 'DetVisualizationHook',
'NumClassCheckHook', 'MeanTeacherHook', 'trigger_visualization_hook',
'PipelineSwitchHook', 'TrackVisualizationHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .pipeline_switch_hook import PipelineSwitchHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .utils import trigger_visualization_hook
from .visualization_hook import DetVisualizationHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook', 'DetVisualizationHook',
'NumClassCheckHook', 'MeanTeacherHook', 'trigger_visualization_hook',
'PipelineSwitchHook'
]
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
# model settings
model = dict(
type='FastRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='FastRCNN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
import torch
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
import v2_extras
return torchvision.transforms.v2, torchvision.datapoints, v2_extras
else:
import transforms
return transforms, None, None
class SegmentationPresetTrain:
def __init__(
self,
*,
base_size,
crop_size,
hflip_prob=0.5,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
backend="pil",
use_v2=False,
):
T, datapoints, v2_extras = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImage())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.RandomResize(min_size=int(0.5 * base_size), max_size=int(2.0 * base_size))]
if hflip_prob > 0:
transforms += [T.RandomHorizontalFlip(hflip_prob)]
if use_v2:
# We need a custom pad transform here, since the padding we want to perform here is fundamentally
# different from the padding in `RandomCrop` if `pad_if_needed=True`.
transforms += [v2_extras.PadIfSmaller(crop_size, fill={datapoints.Mask: 255, "others": 0})]
transforms += [T.RandomCrop(crop_size)]
if backend == "pil":
transforms += [T.PILToTensor()]
if use_v2:
img_type = datapoints.Image if backend == "datapoint" else torch.Tensor
transforms += [
T.ToDtype(dtype={img_type: torch.float32, datapoints.Mask: torch.int64, "others": None}, scale=True)
]
else:
# No need to explicitly convert masks as they're magically int64 already
transforms += [T.ToDtype(torch.float, scale=True)]
transforms += [T.Normalize(mean=mean, std=std)]
if use_v2:
transforms += [T.ToPureTensor()]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class SegmentationPresetEval:
def __init__(
self, *, base_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), backend="pil", use_v2=False
):
T, _, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImage()]
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if use_v2:
transforms += [T.Resize(size=(base_size, base_size))]
else:
transforms += [T.RandomResize(min_size=base_size, max_size=base_size)]
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
transforms += [
T.ToDtype(torch.float, scale=True),
T.Normalize(mean=mean, std=std),
]
if use_v2:
transforms += [T.ToPureTensor()]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
import torch
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
import v2_extras
return torchvision.transforms.v2, torchvision.datapoints, v2_extras
else:
import transforms
return transforms, None, None
class SegmentationPresetTrain:
def __init__(
self,
*,
base_size,
crop_size,
hflip_prob=0.5,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
backend="pil",
use_v2=False,
):
T, datapoints, v2_extras = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImage())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.RandomResize(min_size=int(0.5 * base_size), max_size=int(2.0 * base_size))]
if hflip_prob > 0:
transforms += [T.RandomHorizontalFlip(hflip_prob)]
if use_v2:
# We need a custom pad transform here, since the padding we want to perform here is fundamentally
# different from the padding in `RandomCrop` if `pad_if_needed=True`.
transforms += [v2_extras.PadIfSmaller(crop_size, fill={datapoints.Mask: 255, "others": 0})]
transforms += [T.RandomCrop(crop_size)]
if backend == "pil":
transforms += [T.PILToTensor()]
if use_v2:
img_type = datapoints.Image if backend == "datapoint" else torch.Tensor
transforms += [
T.ToDtype(dtype={img_type: torch.float32, datapoints.Mask: torch.int64, "others": None}, scale=True)
]
else:
# No need to explicitly convert masks as they're magically int64 already
transforms += [T.ConvertImageDtype(torch.float)]
transforms += [T.Normalize(mean=mean, std=std)]
if use_v2:
transforms += [T.ToPureTensor()]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class SegmentationPresetEval:
def __init__(
self, *, base_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), backend="pil", use_v2=False
):
T, _, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImage()]
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if use_v2:
transforms += [T.Resize(size=(base_size, base_size))]
else:
transforms += [T.RandomResize(min_size=base_size, max_size=base_size)]
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
transforms += [
T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
if use_v2:
transforms += [T.ToPureTensor()]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
from typing import Dict, TYPE_CHECKING, Optional
if TYPE_CHECKING:
from docarray import Document
from docarray.array.queryset.lookup import Q, LookupNode, LookupLeaf
LOGICAL_OPERATORS = {'$and': 'and', '$or': 'or', '$not': True}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'$gte': 'gte',
'$eq': 'exact',
'$neq': 'neq',
'$exists': 'exists',
}
REGEX_OPERATORS = {'$regex': 'regex'}
ARRAY_OPERATORS = {'$size': 'size'}
MEMBERSHIP_OPERATORS = {'$in': 'in', '$nin': 'nin'}
SUPPORTED_OPERATORS = {
**COMPARISON_OPERATORS,
**ARRAY_OPERATORS,
**REGEX_OPERATORS,
**MEMBERSHIP_OPERATORS,
}
def _parse_lookups(data: Dict = {}, root_node: Optional[LookupNode] = None):
if isinstance(data, dict):
for key, value in data.items():
if isinstance(root_node, LookupLeaf):
root = LookupNode()
root.add_child(root_node)
root_node = root
if key in LOGICAL_OPERATORS:
if key == '$not':
node = LookupNode(negate=LOGICAL_OPERATORS[key])
else:
node = LookupNode(op=LOGICAL_OPERATORS[key])
node = _parse_lookups(value, root_node=node)
elif key.startswith('$'):
raise ValueError(
f'The operator {key} is not supported yet, please double check the given filters!'
)
else:
if not value or not isinstance(value, dict):
raise ValueError(
'''Not a valid query. It should follow the format:
{ <field1>: { <operator1>: <value1> }, ... }
'''
)
items = list(value.items())
if len(items) == 1:
op, val = items[0]
if op in LOGICAL_OPERATORS:
if op == '$not':
node = LookupNode(negate=LOGICAL_OPERATORS[op])
else:
node = LookupNode(op=LOGICAL_OPERATORS[op])
node = _parse_lookups(val, root_node=node)
elif op in SUPPORTED_OPERATORS:
node = Q(**{f'{key}__{SUPPORTED_OPERATORS[op]}': val})
else:
raise ValueError(
f'The operator {op} is not supported yet, please double check the given filters!'
)
else:
node = LookupNode()
for op, val in items:
_node = _parse_lookups({key: {op: val}})
node.add_child(_node)
if root_node and node:
root_node.add_child(node)
elif node:
root_node = node
elif isinstance(data, list):
for d in data:
node = _parse_lookups(d)
if root_node and node:
root_node.add_child(node)
elif node:
root_node = node
else:
raise ValueError(f'The query is illegal: {data}')
return root_node
class QueryParser:
"""A class to parse dict condition to lookup query."""
def __init__(self, conditions: Dict = {}):
self.conditions = conditions
self.lookup_groups = _parse_lookups(self.conditions)
def evaluate(self, doc: 'Document'):
return self.lookup_groups.evaluate(doc) if self.lookup_groups else True
def __call__(self, doc: 'Document'):
return self.evaluate(doc)
|
from typing import Dict, TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ... import Document
from .lookup import Q, LookupNode, LookupLeaf
LOGICAL_OPERATORS = {'$and': 'and', '$or': 'or', '$not': True}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'$gte': 'gte',
'$eq': 'exact',
'$neq': 'neq',
'$exists': 'exists',
}
REGEX_OPERATORS = {'$regex': 'regex'}
ARRAY_OPERATORS = {'$size': 'size'}
MEMBERSHIP_OPERATORS = {'$in': 'in', '$nin': 'nin'}
SUPPORTED_OPERATORS = {
**COMPARISON_OPERATORS,
**ARRAY_OPERATORS,
**REGEX_OPERATORS,
**MEMBERSHIP_OPERATORS,
}
def _parse_lookups(data: Dict = {}, root_node: Optional[LookupNode] = None):
if isinstance(data, dict):
for key, value in data.items():
if isinstance(root_node, LookupLeaf):
root = LookupNode()
root.add_child(root_node)
root_node = root
if key in LOGICAL_OPERATORS:
if key == '$not':
node = LookupNode(negate=LOGICAL_OPERATORS[key])
else:
node = LookupNode(op=LOGICAL_OPERATORS[key])
node = _parse_lookups(value, root_node=node)
elif key.startswith('$'):
raise ValueError(
f'The operator {key} is not supported yet, please double check the given filters!'
)
else:
if not value or not isinstance(value, dict):
raise ValueError(
'''Not a valid query. It should follow the format:
{ <field1>: { <operator1>: <value1> }, ... }
'''
)
items = list(value.items())
if len(items) == 1:
op, val = items[0]
if op in LOGICAL_OPERATORS:
if op == '$not':
node = LookupNode(negate=LOGICAL_OPERATORS[op])
else:
node = LookupNode(op=LOGICAL_OPERATORS[op])
node = _parse_lookups(val, root_node=node)
elif op in SUPPORTED_OPERATORS:
node = Q(**{f'{key}__{SUPPORTED_OPERATORS[op]}': val})
else:
raise ValueError(
f'The operator {op} is not supported yet, please double check the given filters!'
)
else:
node = LookupNode()
for op, val in items:
_node = _parse_lookups({key: {op: val}})
node.add_child(_node)
if root_node and node:
root_node.add_child(node)
elif node:
root_node = node
elif isinstance(data, list):
for d in data:
node = _parse_lookups(d)
if root_node and node:
root_node.add_child(node)
elif node:
root_node = node
else:
raise ValueError(f'The query is illegal: {data}')
return root_node
class QueryParser:
"""A class to parse dict condition to lookup query."""
def __init__(self, conditions: Dict = {}):
self.conditions = conditions
self.lookup_groups = _parse_lookups(self.conditions)
def evaluate(self, doc: 'Document'):
return self.lookup_groups.evaluate(doc) if self.lookup_groups else True
def __call__(self, doc: 'Document'):
return self.evaluate(doc)
|
import os
from pathlib import Path
from torchaudio.datasets import yesno
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
def get_mock_data(root_dir, labels):
"""
root_dir: path
labels: list of labels
"""
mocked_data = []
base_dir = os.path.join(root_dir, "waves_yesno")
os.makedirs(base_dir, exist_ok=True)
for i, label in enumerate(labels):
filename = f'{"_".join(str(l) for l in label)}.wav'
path = os.path.join(base_dir, filename)
data = get_whitenoise(sample_rate=8000, duration=6, n_channels=1, dtype="int16", seed=i)
save_wav(path, data, 8000)
mocked_data.append(normalize_wav(data))
return mocked_data
class TestYesNo(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
data = []
labels = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
]
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data = get_mock_data(cls.root_dir, cls.labels)
def _test_yesno(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
expected_label = self.labels[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == 8000
assert label == expected_label
n_ite += 1
assert n_ite == len(self.data)
def test_yesno_str(self):
dataset = yesno.YESNO(self.root_dir)
self._test_yesno(dataset)
def test_yesno_path(self):
dataset = yesno.YESNO(Path(self.root_dir))
self._test_yesno(dataset)
|
import os
from pathlib import Path
from torchaudio.datasets import yesno
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
def get_mock_data(root_dir, labels):
"""
root_dir: path
labels: list of labels
"""
mocked_data = []
base_dir = os.path.join(root_dir, "waves_yesno")
os.makedirs(base_dir, exist_ok=True)
for i, label in enumerate(labels):
filename = f'{"_".join(str(l) for l in label)}.wav'
path = os.path.join(base_dir, filename)
data = get_whitenoise(sample_rate=8000, duration=6, n_channels=1, dtype="int16", seed=i)
save_wav(path, data, 8000)
mocked_data.append(normalize_wav(data))
return mocked_data
class TestYesNo(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
data = []
labels = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
]
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data = get_mock_data(cls.root_dir, cls.labels)
def _test_yesno(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
expected_label = self.labels[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == 8000
assert label == expected_label
n_ite += 1
assert n_ite == len(self.data)
def test_yesno_str(self):
dataset = yesno.YESNO(self.root_dir)
self._test_yesno(dataset)
def test_yesno_path(self):
dataset = yesno.YESNO(Path(self.root_dir))
self._test_yesno(dataset)
|
import pytest
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((5)), 1),
(torch.zeros((1, 5)), 2),
(torch.zeros((5, 5)), 2),
(torch.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert TorchCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((10,)), (10,)),
(torch.zeros((5, 5)), (5, 5)),
(torch.zeros(()), ()),
],
)
def test_shape(array, result):
shape = TorchCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
@pytest.mark.parametrize('dtype', [torch.int64, torch.float64, torch.int, torch.float])
def test_dtype(dtype):
tensor = torch.tensor([1, 2, 3], dtype=dtype)
assert TorchCompBackend.dtype(tensor) == dtype
def test_device():
tensor = torch.tensor([1, 2, 3])
assert TorchCompBackend.device(tensor) == 'cpu'
def test_empty():
tensor = TorchCompBackend.empty((10, 3))
assert tensor.shape == (10, 3)
def test_empty_dtype():
tensor = TorchCompBackend.empty((10, 3), dtype=torch.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == torch.int32
def test_empty_device():
tensor = TorchCompBackend.empty((10, 3), device='meta')
assert tensor.shape == (10, 3)
assert tensor.device == torch.device('meta')
def test_squeeze():
tensor = torch.zeros(size=(1, 1, 3, 1))
squeezed = TorchCompBackend.squeeze(tensor)
assert squeezed.shape == (3,)
@pytest.mark.parametrize(
'array,t_range,x_range,result',
[
(
torch.tensor([0, 1, 2, 3, 4, 5]),
(0, 10),
None,
torch.tensor([0, 2, 4, 6, 8, 10]),
),
(
torch.tensor([0, 1, 2, 3, 4, 5]),
(0, 10),
(0, 10),
torch.tensor([0, 1, 2, 3, 4, 5]),
),
(
torch.tensor([[0.0, 1.0], [0.0, 1.0]]),
(0, 10),
None,
torch.tensor([[0.0, 10.0], [0.0, 10.0]]),
),
],
)
def test_minmax_normalize(array, t_range, x_range, result):
output = TorchCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert torch.allclose(output, result)
|
import pytest
import torch
from docarray.computation.torch_backend import TorchCompBackend
def test_to_device():
t = torch.rand(10, 3)
assert t.device == torch.device('cpu')
t = TorchCompBackend.to_device(t, 'meta')
assert t.device == torch.device('meta')
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((5)), 1),
(torch.zeros((1, 5)), 2),
(torch.zeros((5, 5)), 2),
(torch.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert TorchCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(torch.zeros((10,)), (10,)),
(torch.zeros((5, 5)), (5, 5)),
(torch.zeros(()), ()),
],
)
def test_shape(array, result):
shape = TorchCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_empty():
tensor = TorchCompBackend.empty((10, 3))
assert tensor.shape == (10, 3)
def test_empty_dtype():
tensor = TorchCompBackend.empty((10, 3), dtype=torch.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == torch.int32
def test_empty_device():
tensor = TorchCompBackend.empty((10, 3), device='meta')
assert tensor.shape == (10, 3)
assert tensor.device == torch.device('meta')
def test_squeeze():
tensor = torch.zeros(size=(1, 1, 3, 1))
squeezed = TorchCompBackend.squeeze(tensor)
assert squeezed.shape == (3,)
@pytest.mark.parametrize(
'array,t_range,x_range,result',
[
(
torch.tensor([0, 1, 2, 3, 4, 5]),
(0, 10),
None,
torch.tensor([0, 2, 4, 6, 8, 10]),
),
(
torch.tensor([0, 1, 2, 3, 4, 5]),
(0, 10),
(0, 10),
torch.tensor([0, 1, 2, 3, 4, 5]),
),
(
torch.tensor([[0.0, 1.0], [0.0, 1.0]]),
(0, 10),
None,
torch.tensor([[0.0, 10.0], [0.0, 10.0]]),
),
],
)
def test_minmax_normalize(array, t_range, x_range, result):
output = TorchCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert torch.allclose(output, result)
|
from docarray.documents.text import TextDoc
def test_text_document_operators():
doc = TextDoc(text='text', url='url.com')
assert doc == 'text'
assert doc != 'url.com'
doc2 = TextDoc(id=doc.id, text='text', url='url.com')
assert doc == doc2
doc3 = TextDoc(id='other-id', text='text', url='url.com')
assert doc != doc3
assert 't' in doc
assert 'a' not in doc
t = TextDoc(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
text = TextDoc()
assert text is not None
assert text.text is None
|
from docarray.documents.text import Text
def test_text_document_operators():
doc = Text(text='text', url='url.com')
assert doc == 'text'
assert doc != 'url.com'
doc2 = Text(id=doc.id, text='text', url='url.com')
assert doc == doc2
doc3 = Text(id='other-id', text='text', url='url.com')
assert doc != doc3
assert 't' in doc
assert 'a' not in doc
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
text = Text()
assert text is not None
assert text.text is None
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from .generation import Llama
from .model import ModelArgs, Transformer
from .tokenizer import Tokenizer
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from .generation import LLaMA
from .model import ModelArgs, Transformer
from .tokenizer import Tokenizer
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--size-divisor',
type=int,
default=32,
help='Pad the input image, the minimum size that is divisible '
'by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
ori_shape = (3, h, w)
divisor = args.size_divisor
if divisor > 0:
h = int(np.ceil(h / divisor)) * divisor
w = int(np.ceil(w / divisor)) * divisor
input_shape = (3, h, w)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
if divisor > 0 and \
input_shape != ori_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {ori_shape} to {input_shape}\n')
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--size-divisor',
type=int,
default=32,
help='Pad the input image, the minimum size that is divisible '
'by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
orig_shape = (3, h, w)
divisor = args.size_divisor
if divisor > 0:
h = int(np.ceil(h / divisor)) * divisor
w = int(np.ceil(w / divisor)) * divisor
input_shape = (3, h, w)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
if divisor > 0 and \
input_shape != orig_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {orig_shape} to {input_shape}\n')
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = [
dict(
type='CocoMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
metric=['bbox', 'segm']),
dict(
type='CityScapesMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
seg_prefix=data_root + '/gtFine/val',
outfile_prefix='./work_dirs/cityscapes_metric/instance')
]
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/instancesonly_filtered_gtFine_test.json',
# data_prefix=dict(img='leftImg8bit/test/'),
# test_mode=True,
# filter_cfg=dict(filter_empty_gt=True, min_size=32),
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CityScapesMetric',
# format_only=True,
# outfile_prefix='./work_dirs/cityscapes_metric/test')
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = [
dict(
type='CocoMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
metric=['bbox', 'segm']),
dict(
type='CityScapesMetric',
ann_file=data_root +
'annotations/instancesonly_filtered_gtFine_val.json',
seg_prefix=data_root + '/gtFine/val',
outfile_prefix='./work_dirs/cityscapes_metric/instance')
]
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/instancesonly_filtered_gtFine_test.json',
# data_prefix=dict(img='leftImg8bit/test/'),
# test_mode=True,
# filter_cfg=dict(filter_empty_gt=True, min_size=32),
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CityScapesMetric',
# format_only=True,
# outfile_prefix='./work_dirs/cityscapes_metric/test')
|
import itertools
import numpy as np
from absl.testing import parameterized
from keras.src import ops
from keras.src import testing
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
convert_format,
)
class ConvertersTest(testing.TestCase):
def setUp(self):
xyxy_box = np.array(
[[[10, 20, 110, 120], [20, 30, 120, 130]]], dtype="float32"
)
yxyx_box = np.array(
[[[20, 10, 120, 110], [30, 20, 130, 120]]], dtype="float32"
)
rel_xyxy_box = np.array(
[[[0.01, 0.02, 0.11, 0.12], [0.02, 0.03, 0.12, 0.13]]],
dtype="float32",
)
rel_yxyx_box = np.array(
[[[0.02, 0.01, 0.12, 0.11], [0.03, 0.02, 0.13, 0.12]]],
dtype="float32",
)
center_xywh_box = np.array(
[[[60, 70, 100, 100], [70, 80, 100, 100]]], dtype="float32"
)
center_yxhw_box = np.array(
[[[70, 60, 100, 100], [80, 70, 100, 100]]], dtype="float32"
)
xywh_box = np.array(
[[[10, 20, 100, 100], [20, 30, 100, 100]]], dtype="float32"
)
rel_xywh_box = np.array(
[[[0.01, 0.02, 0.1, 0.1], [0.02, 0.03, 0.1, 0.1]]], dtype="float32"
)
self.images = np.ones([2, 1000, 1000, 3])
self.height = 1000
self.width = 1000
self.boxes = {
"xyxy": xyxy_box,
"center_xywh": center_xywh_box,
"rel_xywh": rel_xywh_box,
"xywh": xywh_box,
"rel_xyxy": rel_xyxy_box,
"yxyx": yxyx_box,
"rel_yxyx": rel_yxyx_box,
"center_yxhw": center_yxhw_box,
}
@parameterized.named_parameters(
*[
(f"{source}_{target}", source, target)
for (source, target) in itertools.permutations(
[
"xyxy",
"yxyx",
"xywh",
"rel_xyxy",
"rel_yxyx",
"center_xywh",
"center_yxhw",
],
2,
)
]
+ [("xyxy_xyxy", "xyxy", "xyxy")]
)
def test_convert_all_formats(self, source, target):
source_box = self.boxes[source]
target_box = self.boxes[target]
self.assertAllClose(
convert_format(
source_box,
source=source,
target=target,
height=self.height,
width=self.width,
),
target_box,
)
def test_convert_format_invalid_source(self):
boxes = self.boxes["xywh"]
with self.assertRaises(ValueError):
convert_format(boxes, source="invalid", target="xywh")
def test_convert_format_invalid_target(self):
boxes = self.boxes["xyxy"]
with self.assertRaises(ValueError):
convert_format(boxes, source="xyxy", target="invalid")
def test_convert_format_missing_dimensions(self):
boxes = self.boxes["xyxy"]
with self.assertRaisesRegex(
ValueError, r"must receive `height` and `width`"
):
convert_format(boxes, source="xyxy", target="rel_xyxy")
def test_clip_to_image_size(self):
boxes = {
"boxes": np.array([[0.0, 0.0, 1.5, 1.6], [0.5, 0.4, 0.7, 0.8]]),
"labels": np.array([0, 1]),
}
expected_clipped = {
"boxes": np.array([[0.0, 0.0, 1.0, 1.0], [0.5, 0.4, 0.7, 0.8]]),
"labels": np.array([0, 1]),
}
clipped_boxes = clip_to_image_size(
boxes, bounding_box_format="rel_xyxy"
)
self.assertAllEqual(clipped_boxes, expected_clipped)
def test_affine_identity(self):
# Test identity transform (no change)
batch_size = self.boxes["xyxy"].shape[0]
transformed_boxes = affine_transform(
boxes=self.boxes["xyxy"],
angle=np.zeros([batch_size]),
translate_x=np.zeros([batch_size]),
translate_y=np.zeros([batch_size]),
scale=np.ones([batch_size]),
shear_x=np.zeros([batch_size]),
shear_y=np.zeros([batch_size]),
height=self.height,
width=self.width,
)
transformed_boxes = ops.convert_to_numpy(transformed_boxes)
self.assertAllClose(self.boxes["xyxy"], transformed_boxes)
|
import itertools
import numpy as np
from absl.testing import parameterized
from keras.src import testing
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
affine_transform,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
clip_to_image_size,
)
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.converters import ( # noqa: E501
convert_format,
)
class ConvertersTest(testing.TestCase):
def setUp(self):
xyxy_box = np.array(
[[[10, 20, 110, 120], [20, 30, 120, 130]]], dtype="float32"
)
yxyx_box = np.array(
[[[20, 10, 120, 110], [30, 20, 130, 120]]], dtype="float32"
)
rel_xyxy_box = np.array(
[[[0.01, 0.02, 0.11, 0.12], [0.02, 0.03, 0.12, 0.13]]],
dtype="float32",
)
rel_yxyx_box = np.array(
[[[0.02, 0.01, 0.12, 0.11], [0.03, 0.02, 0.13, 0.12]]],
dtype="float32",
)
center_xywh_box = np.array(
[[[60, 70, 100, 100], [70, 80, 100, 100]]], dtype="float32"
)
center_yxhw_box = np.array(
[[[70, 60, 100, 100], [80, 70, 100, 100]]], dtype="float32"
)
xywh_box = np.array(
[[[10, 20, 100, 100], [20, 30, 100, 100]]], dtype="float32"
)
rel_xywh_box = np.array(
[[[0.01, 0.02, 0.1, 0.1], [0.02, 0.03, 0.1, 0.1]]], dtype="float32"
)
self.images = np.ones([2, 1000, 1000, 3])
self.height = 1000
self.width = 1000
self.boxes = {
"xyxy": xyxy_box,
"center_xywh": center_xywh_box,
"rel_xywh": rel_xywh_box,
"xywh": xywh_box,
"rel_xyxy": rel_xyxy_box,
"yxyx": yxyx_box,
"rel_yxyx": rel_yxyx_box,
"center_yxhw": center_yxhw_box,
}
@parameterized.named_parameters(
*[
(f"{source}_{target}", source, target)
for (source, target) in itertools.permutations(
[
"xyxy",
"yxyx",
"xywh",
"rel_xyxy",
"rel_yxyx",
"center_xywh",
"center_yxhw",
],
2,
)
]
+ [("xyxy_xyxy", "xyxy", "xyxy")]
)
def test_convert_all_formats(self, source, target):
source_box = self.boxes[source]
target_box = self.boxes[target]
self.assertAllClose(
convert_format(
source_box,
source=source,
target=target,
height=self.height,
width=self.width,
),
target_box,
)
def test_convert_format_invalid_source(self):
boxes = self.boxes["xywh"]
with self.assertRaises(ValueError):
convert_format(boxes, source="invalid", target="xywh")
def test_convert_format_invalid_target(self):
boxes = self.boxes["xyxy"]
with self.assertRaises(ValueError):
convert_format(boxes, source="xyxy", target="invalid")
def test_convert_format_missing_dimensions(self):
boxes = self.boxes["xyxy"]
with self.assertRaisesRegex(
ValueError, r"must receive `height` and `width`"
):
convert_format(boxes, source="xyxy", target="rel_xyxy")
def test_clip_to_image_size(self):
boxes = {
"boxes": np.array([[0.0, 0.0, 1.5, 1.6], [0.5, 0.4, 0.7, 0.8]]),
"labels": np.array([0, 1]),
}
expected_clipped = {
"boxes": np.array([[0.0, 0.0, 1.0, 1.0], [0.5, 0.4, 0.7, 0.8]]),
"labels": np.array([0, 1]),
}
clipped_boxes = clip_to_image_size(
boxes, bounding_box_format="rel_xyxy"
)
self.assertAllEqual(clipped_boxes, expected_clipped)
def test_affine_identity(self):
# Test identity transform (no change)
batch_size = self.boxes["xyxy"].shape[0]
transformed_boxes = affine_transform(
boxes=self.boxes["xyxy"],
angle=np.zeros([batch_size]),
translate_x=np.zeros([batch_size]),
translate_y=np.zeros([batch_size]),
scale=np.ones([batch_size]),
shear_x=np.zeros([batch_size]),
shear_y=np.zeros([batch_size]),
height=self.height,
width=self.width,
)
self.assertAllClose(self.boxes["xyxy"], transformed_boxes)
|
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from box_sdk_gen import (
BoxClient,
)
from llama_index.readers.box.BoxAPI.box_api import (
box_check_connection,
get_box_files_details,
get_ai_response_from_box_files,
add_extra_header_to_box_client,
)
from llama_index.readers.box.BoxAPI.box_llama_adaptors import box_file_to_llama_document
class BoxAIPromptToolSpec(BaseToolSpec):
"""
Generates AI prompts based on a Box file.
Args:
box_client (BoxClient): A BoxClient instance for interacting with Box API.
Attributes:
spec_functions (list): A list of supported functions.
_box_client (BoxClient): An instance of BoxClient for interacting with Box API.
Methods:
ai_prompt(file_id, ai_prompt): Generates an AI prompt based on a Box file.
Args:
file_id (str): The ID of the Box file.
ai_prompt (str): The base AI prompt to use.
Returns:
Document: A Document object containing the generated AI prompt.
"""
spec_functions = ["ai_prompt"]
_box_client: BoxClient
def __init__(self, box_client: BoxClient) -> None:
"""
Initializes the BoxAIPromptToolSpec with a BoxClient instance.
Args:
box_client (BoxClient): The BoxClient instance to use for interacting with the Box API.
"""
self._box_client = add_extra_header_to_box_client(box_client)
def ai_prompt(
self,
file_id: str,
ai_prompt: str,
) -> Document:
"""
Generates an AI prompt based on a Box file.
Retrieves the specified Box file, constructs an AI prompt using the provided base prompt,
and returns a Document object containing the generated prompt and file metadata.
Args:
file_id (str): The ID of the Box file to process.
ai_prompt (str): The base AI prompt to use as a template.
Returns:
Document: A Document object containing the generated AI prompt and file metadata.
"""
# Connect to Box
box_check_connection(self._box_client)
# get box files information
box_file = get_box_files_details(
box_client=self._box_client, file_ids=[file_id]
)[0]
box_file = get_ai_response_from_box_files(
box_client=self._box_client,
box_files=[box_file],
ai_prompt=ai_prompt,
)[0]
doc = box_file_to_llama_document(box_file)
doc.text = box_file.ai_response if box_file.ai_response else ""
doc.metadata["ai_prompt"] = box_file.ai_prompt
doc.metadata["ai_response"] = (
box_file.ai_response if box_file.ai_response else ""
)
return doc
|
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from box_sdk_gen import (
BoxClient,
)
from llama_index.readers.box.BoxAPI.box_api import (
box_check_connection,
get_box_files_details,
get_ai_response_from_box_files,
add_extra_header_to_box_client,
)
from llama_index.readers.box.BoxAPI.box_llama_adaptors import box_file_to_llama_document
class BoxAIPromptToolSpec(BaseToolSpec):
"""
Generates AI prompts based on a Box file.
Args:
box_client (BoxClient): A BoxClient instance for interacting with Box API.
Attributes:
spec_functions (list): A list of supported functions.
_box_client (BoxClient): An instance of BoxClient for interacting with Box API.
Methods:
ai_prompt(file_id, ai_prompt): Generates an AI prompt based on a Box file.
Args:
file_id (str): The ID of the Box file.
ai_prompt (str): The base AI prompt to use.
Returns:
Document: A Document object containing the generated AI prompt.
"""
spec_functions = ["ai_prompt"]
_box_client: BoxClient
def __init__(self, box_client: BoxClient) -> None:
"""
Initializes the BoxAIPromptToolSpec with a BoxClient instance.
Args:
box_client (BoxClient): The BoxClient instance to use for interacting with the Box API.
"""
self._box_client = add_extra_header_to_box_client(box_client)
def ai_prompt(
self,
file_id: str,
ai_prompt: str,
) -> Document:
"""
Generates an AI prompt based on a Box file.
Retrieves the specified Box file, constructs an AI prompt using the provided base prompt,
and returns a Document object containing the generated prompt and file metadata.
Args:
file_id (str): The ID of the Box file to process.
ai_prompt (str): The base AI prompt to use as a template.
Returns:
Document: A Document object containing the generated AI prompt and file metadata.
"""
# Connect to Box
box_check_connection(self._box_client)
# get box files information
box_file = get_box_files_details(
box_client=self._box_client, file_ids=[file_id]
)[0]
box_file = get_ai_response_from_box_files(
box_client=self._box_client,
box_files=[box_file],
ai_prompt=ai_prompt,
)[0]
doc = box_file_to_llama_document(box_file)
doc.text = box_file.ai_response if box_file.ai_response else ""
doc.metadata["ai_prompt"] = box_file.ai_prompt
doc.metadata["ai_response"] = (
box_file.ai_response if box_file.ai_response else ""
)
return doc
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .convfc_bbox_head import ConvFCBBoxHead
@MODELS.register_module()
class SCNetBBoxHead(ConvFCBBoxHead):
"""BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us
to get intermediate shared feature.
"""
def _forward_shared(self, x):
"""Forward function for shared part."""
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
return x
def _forward_cls_reg(self, x):
"""Forward function for classification and regression parts."""
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
def forward(self, x, return_shared_feat=False):
"""Forward function.
Args:
x (Tensor): input features
return_shared_feat (bool): If True, return cls-reg-shared feature.
Return:
out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``,
if ``return_shared_feat`` is True, append ``x_shared`` to the
returned tuple.
"""
x_shared = self._forward_shared(x)
out = self._forward_cls_reg(x_shared)
if return_shared_feat:
out += (x_shared, )
return out
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.builder import HEADS
from .convfc_bbox_head import ConvFCBBoxHead
@HEADS.register_module()
class SCNetBBoxHead(ConvFCBBoxHead):
"""BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us
to get intermediate shared feature.
"""
def _forward_shared(self, x):
"""Forward function for shared part."""
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
return x
def _forward_cls_reg(self, x):
"""Forward function for classification and regression parts."""
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
def forward(self, x, return_shared_feat=False):
"""Forward function.
Args:
x (Tensor): input features
return_shared_feat (bool): If True, return cls-reg-shared feature.
Return:
out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``,
if ``return_shared_feat`` is True, append ``x_shared`` to the
returned tuple.
"""
x_shared = self._forward_shared(x)
out = self._forward_cls_reg(x_shared)
if return_shared_feat:
out += (x_shared, )
return out
|
import numpy as np
import pytest
from docarray import DocumentArray, Document
from docarray.array.storage.base.helper import Offset2ID
@pytest.fixture(scope='function')
def docs():
d1 = Document(embedding=np.array([10, 0]))
d2 = Document(embedding=np.array([0, 10]))
d3 = Document(embedding=np.array([-10, -10]))
yield d1, d2, d3
@pytest.mark.parametrize(
'storage,config',
[
('weaviate', {'n_dim': 2, 'list_like': False}),
('annlite', {'n_dim': 2, 'list_like': False}),
('qdrant', {'n_dim': 2, 'list_like': False}),
('elasticsearch', {'n_dim': 2, 'list_like': False}),
('redis', {'n_dim': 2, 'list_like': False}),
],
)
def test_disable_offset2id(docs, storage, config, start_storage):
if config:
da = DocumentArray(storage=storage, config=config)
else:
da = DocumentArray(storage=storage)
assert len(da) == 0
da.extend(docs)
assert len(da) == 3
with pytest.raises(ValueError):
da[0]
def test_offset2ids(docs, start_storage):
list_like = False
ids = [str(i) for i in range(3)]
offset2id = Offset2ID(ids, list_like)
len_before = len(offset2id)
offset2id.append("4")
len_after = len(offset2id)
assert len_before == len_after
len_before = len(offset2id)
offset2id.extend("4")
len_after = len(offset2id)
assert len_before == len_after
len_before = len(offset2id)
offset2id.delete_by_ids("3")
len_after = len(offset2id)
assert len_before == len_after
with pytest.raises(ValueError):
offset2id.index(2)
with pytest.raises(ValueError):
offset2id.get_id("2")
|
import numpy as np
import pytest
from docarray import DocumentArray, Document
from docarray.array.storage.base.helper import Offset2ID
@pytest.fixture(scope='function')
def docs():
d1 = Document(embedding=np.array([10, 0]))
d2 = Document(embedding=np.array([0, 10]))
d3 = Document(embedding=np.array([-10, -10]))
yield d1, d2, d3
@pytest.mark.parametrize(
'storage,config',
[
('weaviate', {'n_dim': 2, 'list_like': False}),
('annlite', {'n_dim': 2, 'list_like': False}),
('qdrant', {'n_dim': 2, 'list_like': False}),
('elasticsearch', {'n_dim': 2, 'list_like': False}),
('redis', {'n_dim': 2, 'list_like': False}),
],
)
def test_disable_offset2id(docs, storage, config, start_storage):
if config:
da = DocumentArray(docs, storage=storage, config=config)
else:
da = DocumentArray(docs, storage=storage)
with pytest.raises(ValueError):
da[0]
def test_offset2ids(docs, start_storage):
list_like = False
ids = [str(i) for i in range(3)]
offset2id = Offset2ID(ids, list_like)
len_before = len(offset2id)
offset2id.append("4")
len_after = len(offset2id)
assert len_before == len_after
len_before = len(offset2id)
offset2id.extend("4")
len_after = len(offset2id)
assert len_before == len_after
len_before = len(offset2id)
offset2id.delete_by_ids("3")
len_after = len(offset2id)
assert len_before == len_after
with pytest.raises(ValueError):
offset2id.index(2)
with pytest.raises(ValueError):
offset2id.get_id("2")
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseAnglELoss(model), corpus_regularizer_weight=5e-5, use_corpus_regularizer_only=True
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseAnglELoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseAnglELoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseAnglELoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.human import (
AsyncHumanApprovalCallbackHandler,
HumanApprovalCallbackHandler,
HumanRejectedException,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"HumanRejectedException": "langchain_community.callbacks.human",
"HumanApprovalCallbackHandler": "langchain_community.callbacks.human",
"AsyncHumanApprovalCallbackHandler": "langchain_community.callbacks.human",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AsyncHumanApprovalCallbackHandler",
"HumanApprovalCallbackHandler",
"HumanRejectedException",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.human import (
AsyncHumanApprovalCallbackHandler,
HumanApprovalCallbackHandler,
HumanRejectedException,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"HumanRejectedException": "langchain_community.callbacks.human",
"HumanApprovalCallbackHandler": "langchain_community.callbacks.human",
"AsyncHumanApprovalCallbackHandler": "langchain_community.callbacks.human",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"HumanRejectedException",
"HumanApprovalCallbackHandler",
"AsyncHumanApprovalCallbackHandler",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
"""Check invalid loss hook.
This hook will regularly check whether the loss is valid
during training.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval: int = 50) -> None:
self.interval = interval
def after_train_iter(self,
runner: Runner,
batch_idx: int,
data_batch: Optional[dict] = None,
outputs: Optional[dict] = None) -> None:
"""Regularly check whether the loss is valid every n iterations.
Args:
runner (:obj:`Runner`): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (dict, Optional): Data from dataloader.
Defaults to None.
outputs (dict, Optional): Outputs from model. Defaults to None.
"""
if self.every_n_train_iters(runner, self.interval):
assert torch.isfinite(outputs['loss']), \
runner.logger.info('loss become infinite or NaN!')
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmengine.data import BaseDataElement
from mmengine.hooks import Hook
from mmengine.runner import Runner
from mmdet.registry import HOOKS
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
"""Check invalid loss hook.
This hook will regularly check whether the loss is valid
during training.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval: int = 50) -> None:
self.interval = interval
def after_train_iter(
self,
runner: Runner,
batch_idx: int,
data_batch: Optional[Sequence[dict]] = None,
outputs: Optional[Union[Sequence[BaseDataElement],
dict]] = None) -> None:
"""Regularly check whether the loss is valid every n iterations.
Args:
runner (:obj:`Runner`): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], Optional): Data from dataloader.
Defaults to None.
outputs (Union[Sequence[:obj:`BaseDataElement`], dict], Optional):
Outputs from model. Defaults to None.
"""
if self.every_n_train_iters(runner, self.interval):
assert torch.isfinite(outputs['loss']), \
runner.logger.info('loss become infinite or NaN!')
|
def __getattr__(name: str):
import warnings
warnings.warn(
"Torchaudio's I/O functions now support per-call backend dispatch. "
"Importing backend implementation directly is no longer guaranteed to work. "
"Please use `backend` keyword with load/save/info function, instead of "
"calling the underlying implementation directly.",
stacklevel=2,
)
from . import _no_backend
return getattr(_no_backend, name)
|
def __getattr__(name: str):
import warnings
warnings.warn(
"Torchaudio's I/O functions now support par-call bakcend dispatch. "
"Importing backend implementation directly is no longer guaranteed to work. "
"Please use `backend` keyword with load/save/info function, instead of "
"calling the udnerlying implementation directly.",
stacklevel=2,
)
from . import _no_backend
return getattr(_no_backend, name)
|
import os
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.events.rerank import (
ReRankEndEvent,
ReRankStartEvent,
)
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import NodeWithScore, QueryBundle, MetadataMode
from mixedbread_ai.core import RequestOptions
from mixedbread_ai.client import MixedbreadAI
import httpx
dispatcher = get_dispatcher(__name__)
class MixedbreadAIRerank(BaseNodePostprocessor):
"""
Class for reranking nodes using the mixedbread ai reranking API with models such as 'mixedbread-ai/mxbai-rerank-large-v1'.
Args:
top_n (int): Top N nodes to return. Defaults to 10.
model (str): mixedbread ai model name. Defaults to "mixedbread-ai/mxbai-rerank-large-v1".
api_key (Optional[str]): mixedbread ai API key. Defaults to None.
max_retries (Optional[int]): Maximum number of retries for API calls. Defaults to None.
timeout (Optional[float]): Timeout for API calls.
httpx_client (Optional[httpx.Client]): Custom HTTPX client for synchronous requests.
httpx_async_client (Optional[httpx.AsyncClient]): Custom HTTPX client for asynchronous requests.
"""
model: str = Field(
default="mixedbread-ai/mxbai-rerank-large-v1",
description="mixedbread ai model name.",
min_length=1,
)
top_n: int = Field(default=10, description="Top N nodes to return.", gt=0)
_client: Any = PrivateAttr()
_async_client: Any = PrivateAttr()
_request_options: Optional[RequestOptions] = PrivateAttr()
def __init__(
self,
top_n: int = 10,
model: str = "mixedbread-ai/mxbai-rerank-large-v1",
api_key: Optional[str] = None,
max_retries: Optional[int] = None,
timeout: Optional[float] = None,
httpx_client: Optional[httpx.Client] = None,
httpx_async_client: Optional[httpx.AsyncClient] = None,
):
super().__init__(top_n=top_n, model=model)
try:
api_key = api_key or os.environ["MXBAI_API_KEY"]
except KeyError:
raise ValueError(
"Must pass in mixedbread ai API key or "
"specify via MXBAI_API_KEY environment variable"
)
self._client = MixedbreadAI(
api_key=api_key, timeout=timeout, httpx_client=httpx_client
)
self._async_client = MixedbreadAI(
api_key=api_key, timeout=timeout, httpx_client=httpx_async_client
)
self._request_options = (
RequestOptions(max_retries=max_retries) if max_retries is not None else None
)
@classmethod
def class_name(cls) -> str:
return "MixedbreadAIRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""
Postprocess nodes by reranking them using the mixedbread ai reranking API.
Args:
nodes (List[NodeWithScore]): List of nodes to rerank.
query_bundle (Optional[QueryBundle]): Query bundle containing the query string.
Returns:
List[NodeWithScore]: Reranked list of nodes.
"""
dispatcher.event(
ReRankStartEvent(
query=query_bundle, nodes=nodes, top_n=self.top_n, model_name=self.model
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
texts = [
node.node.get_content(metadata_mode=MetadataMode.EMBED)
for node in nodes
]
results = self._client.reranking(
model=self.model,
query=query_bundle.query_str,
input=texts,
top_k=self.top_n,
return_input=False,
request_options=self._request_options,
)
new_nodes = []
for result in results.data:
new_node_with_score = NodeWithScore(
node=nodes[result.index].node, score=result.score
)
new_nodes.append(new_node_with_score)
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
|
import os
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks import CBEventType, EventPayload
from llama_index.core.instrumentation import get_dispatcher
from llama_index.core.instrumentation.events.rerank import (
ReRankEndEvent,
ReRankStartEvent,
)
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import NodeWithScore, QueryBundle, MetadataMode
from mixedbread_ai.core import RequestOptions
from mixedbread_ai.client import MixedbreadAI
import httpx
dispatcher = get_dispatcher(__name__)
class MixedbreadAIRerank(BaseNodePostprocessor):
"""
Class for reranking nodes using the mixedbread ai reranking API with models such as 'mixedbread-ai/mxbai-rerank-large-v1'.
Args:
top_n (int): Top N nodes to return. Defaults to 10.
model (str): mixedbread ai model name. Defaults to "mixedbread-ai/mxbai-rerank-large-v1".
api_key (Optional[str]): mixedbread ai API key. Defaults to None.
max_retries (Optional[int]): Maximum number of retries for API calls. Defaults to None.
timeout (Optional[float]): Timeout for API calls.
httpx_client (Optional[httpx.Client]): Custom HTTPX client for synchronous requests.
httpx_async_client (Optional[httpx.AsyncClient]): Custom HTTPX client for asynchronous requests.
"""
model: str = Field(
default="mixedbread-ai/mxbai-rerank-large-v1",
description="mixedbread ai model name.",
min_length=1,
)
top_n: int = Field(default=10, description="Top N nodes to return.", gt=0)
_client: Any = PrivateAttr()
_async_client: Any = PrivateAttr()
_request_options: Optional[RequestOptions] = PrivateAttr()
def __init__(
self,
top_n: int = 10,
model: str = "mixedbread-ai/mxbai-rerank-large-v1",
api_key: Optional[str] = None,
max_retries: Optional[int] = None,
timeout: Optional[float] = None,
httpx_client: Optional[httpx.Client] = None,
httpx_async_client: Optional[httpx.AsyncClient] = None,
):
super().__init__(top_n=top_n, model=model)
try:
api_key = api_key or os.environ["MXBAI_API_KEY"]
except KeyError:
raise ValueError(
"Must pass in mixedbread ai API key or "
"specify via MXBAI_API_KEY environment variable"
)
self._client = MixedbreadAI(
api_key=api_key, timeout=timeout, httpx_client=httpx_client
)
self._async_client = MixedbreadAI(
api_key=api_key, timeout=timeout, httpx_client=httpx_async_client
)
self._request_options = (
RequestOptions(max_retries=max_retries) if max_retries is not None else None
)
@classmethod
def class_name(cls) -> str:
return "MixedbreadAIRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""
Postprocess nodes by reranking them using the mixedbread ai reranking API.
Args:
nodes (List[NodeWithScore]): List of nodes to rerank.
query_bundle (Optional[QueryBundle]): Query bundle containing the query string.
Returns:
List[NodeWithScore]: Reranked list of nodes.
"""
dispatcher.event(
ReRankStartEvent(
query=query_bundle, nodes=nodes, top_n=self.top_n, model_name=self.model
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
texts = [
node.node.get_content(metadata_mode=MetadataMode.EMBED)
for node in nodes
]
results = self._client.reranking(
model=self.model,
query=query_bundle.query_str,
input=texts,
top_k=self.top_n,
return_input=False,
request_options=self._request_options,
)
new_nodes = []
for result in results.data:
new_node_with_score = NodeWithScore(
node=nodes[result.index].node, score=result.score
)
new_nodes.append(new_node_with_score)
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
|
import wave
from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class AudioDataMixin:
"""Provide helper functions for :class:`Document` to support audio data."""
def save_audio_tensor_to_file(
self: 'T',
file: Union[str, BinaryIO],
sample_rate: int = 44100,
sample_width: int = 2,
) -> 'T':
"""Save :attr:`.tensor` into an wav file. Mono/stereo is preserved.
:param file: if file is a string, open the file by that name, otherwise treat it as a file-like object.
:param sample_rate: sampling frequency
:param sample_width: sample width in bytes
:return: Document itself after processed
"""
# Convert to (little-endian) 16 bit integers.
max_int16 = 2**15
tensor = (self.tensor * max_int16).astype('<h')
n_channels = 2 if self.tensor.ndim > 1 else 1
with wave.open(file, 'w') as f:
# 2 Channels.
f.setnchannels(n_channels)
# 2 bytes per sample.
f.setsampwidth(sample_width)
f.setframerate(sample_rate)
f.writeframes(tensor.tobytes())
return self
def load_uri_to_audio_tensor(self: 'T') -> 'T':
"""Convert an audio :attr:`.uri` into :attr:`.tensor` inplace
:return: Document itself after processed
"""
with wave.open(
self.uri
) as ifile: #: note wave is Python built-in module https://docs.python.org/3/library/wave.html
samples = ifile.getnframes()
audio = ifile.readframes(samples)
# Convert buffer to float32 using NumPy
audio_as_np_int16 = np.frombuffer(audio, dtype=np.int16)
audio_as_np_float32 = audio_as_np_int16.astype(np.float32)
# Normalise float32 array so that values are between -1.0 and +1.0
max_int16 = 2**15
audio_normalised = audio_as_np_float32 / max_int16
channels = ifile.getnchannels()
if channels == 2:
# 1 for mono, 2 for stereo
audio_stereo = np.empty(
(int(len(audio_normalised) / channels), channels)
)
audio_stereo[:, 0] = audio_normalised[
range(0, len(audio_normalised), 2)
]
audio_stereo[:, 1] = audio_normalised[
range(1, len(audio_normalised), 2)
]
self.tensor = audio_stereo
else:
self.tensor = audio_normalised
return self
|
import wave
from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray.typing import T
class AudioDataMixin:
"""Provide helper functions for :class:`Document` to support audio data."""
def save_audio_tensor_to_file(
self: 'T',
file: Union[str, BinaryIO],
sample_rate: int = 44100,
sample_width: int = 2,
) -> 'T':
"""Save :attr:`.tensor` into an wav file. Mono/stereo is preserved.
:param file: if file is a string, open the file by that name, otherwise treat it as a file-like object.
:param sample_rate: sampling frequency
:param sample_width: sample width in bytes
:return: Document itself after processed
"""
# Convert to (little-endian) 16 bit integers.
max_int16 = 2**15
tensor = (self.tensor * max_int16).astype('<h')
n_channels = 2 if self.tensor.ndim > 1 else 1
with wave.open(file, 'w') as f:
# 2 Channels.
f.setnchannels(n_channels)
# 2 bytes per sample.
f.setsampwidth(sample_width)
f.setframerate(sample_rate)
f.writeframes(tensor.tobytes())
return self
def load_uri_to_audio_tensor(self: 'T') -> 'T':
"""Convert an audio :attr:`.uri` into :attr:`.tensor` inplace
:return: Document itself after processed
"""
with wave.open(
self.uri
) as ifile: #: note wave is Python built-in module https://docs.python.org/3/library/wave.html
samples = ifile.getnframes()
audio = ifile.readframes(samples)
# Convert buffer to float32 using NumPy
audio_as_np_int16 = np.frombuffer(audio, dtype=np.int16)
audio_as_np_float32 = audio_as_np_int16.astype(np.float32)
# Normalise float32 array so that values are between -1.0 and +1.0
max_int16 = 2**15
audio_normalised = audio_as_np_float32 / max_int16
channels = ifile.getnchannels()
if channels == 2:
# 1 for mono, 2 for stereo
audio_stereo = np.empty(
(int(len(audio_normalised) / channels), channels)
)
audio_stereo[:, 0] = audio_normalised[
range(0, len(audio_normalised), 2)
]
audio_stereo[:, 1] = audio_normalised[
range(1, len(audio_normalised), 2)
]
self.tensor = audio_stereo
else:
self.tensor = audio_normalised
return self
|
from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from .._api import register_dataset, register_info
NAME = "food101"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Food101(Dataset):
"""Food 101 dataset
homepage="https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101",
"""
def __init__(self, root: Union[str, Path], *, split: str = "train", skip_integrity_check: bool = False) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
url="http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz",
sha256="d97d15e438b7f4498f96086a4f7e2fa42a32f2712e87d3295441b2b6314053a4",
preprocess="decompress",
)
]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parents[0].name == "meta":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[str, Tuple[str, BinaryIO]]) -> Dict[str, Any]:
id, (path, buffer) = data
return dict(
label=Label.from_category(id.split("/", 1)[0], categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _image_key(self, data: Tuple[str, Any]) -> str:
path = Path(data[0])
return path.relative_to(path.parents[1]).with_suffix("").as_posix()
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, split_dp = Demultiplexer(
archive_dp, 2, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True, return_path=False)
split_dp = hint_sharding(split_dp)
split_dp = hint_shuffling(split_dp)
dp = IterKeyZipper(
split_dp,
images_dp,
key_fn=getitem(),
ref_key_fn=self._image_key,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", "classes.txt"))
dp = LineReader(dp, decode=True, return_path=False)
return list(dp)
def __len__(self) -> int:
return 75_750 if self._split == "train" else 25_250
|
from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "food101"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Food101(Dataset):
"""Food 101 dataset
homepage="https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101",
"""
def __init__(self, root: Union[str, Path], *, split: str = "train", skip_integrity_check: bool = False) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
url="http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz",
sha256="d97d15e438b7f4498f96086a4f7e2fa42a32f2712e87d3295441b2b6314053a4",
preprocess="decompress",
)
]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parents[0].name == "meta":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[str, Tuple[str, BinaryIO]]) -> Dict[str, Any]:
id, (path, buffer) = data
return dict(
label=Label.from_category(id.split("/", 1)[0], categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _image_key(self, data: Tuple[str, Any]) -> str:
path = Path(data[0])
return path.relative_to(path.parents[1]).with_suffix("").as_posix()
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, split_dp = Demultiplexer(
archive_dp, 2, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True, return_path=False)
split_dp = hint_sharding(split_dp)
split_dp = hint_shuffling(split_dp)
dp = IterKeyZipper(
split_dp,
images_dp,
key_fn=getitem(),
ref_key_fn=self._image_key,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", "classes.txt"))
dp = LineReader(dp, decode=True, return_path=False)
return list(dp)
def __len__(self) -> int:
return 75_750 if self._split == "train" else 25_250
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=i) for i in range(4)]
doc_vec = DocVec[MyDoc](docs)
storage = doc_vec._storage
result = str(doc_vec[0])
assert 'MyDoc' in result
assert 'id' in result
assert 'tensor' in result
assert 'name' in result
doc = MyDoc.from_view(ColumnStorageView(0, storage))
assert doc.is_view()
assert doc.id == '0'
assert (doc.tensor == np.zeros(10)).all()
assert doc.name == 'hello'
storage.columns['id'][0] = '12345'
storage.columns['tensor'][0] = np.ones(10)
storage.columns['name'][0] = 'byebye'
assert doc.id == '12345'
assert (doc.tensor == np.ones(10)).all()
assert doc.name == 'byebye'
|
import numpy as np
from docarray import BaseDoc
from docarray.array import DocVec
from docarray.array.doc_vec.column_storage import ColumnStorageView
from docarray.typing import AnyTensor
def test_document_view():
class MyDoc(BaseDoc):
tensor: AnyTensor
name: str
docs = [MyDoc(tensor=np.zeros((10, 10)), name='hello', id=i) for i in range(4)]
storage = DocVec[MyDoc](docs)._storage
doc = MyDoc.from_view(ColumnStorageView(0, storage))
assert doc.is_view()
assert doc.id == '0'
assert (doc.tensor == np.zeros(10)).all()
assert doc.name == 'hello'
storage.columns['id'][0] = '12345'
storage.columns['tensor'][0] = np.ones(10)
storage.columns['name'][0] = 'byebye'
assert doc.id == '12345'
assert (doc.tensor == np.ones(10)).all()
assert doc.name == 'byebye'
|
# mypy: allow-untyped-defs
r"""Contains definitions of the methods used by the _BaseDataLoaderIter to put fetched tensors into pinned memory.
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import collections
import copy
import queue
import torch
from torch._utils import ExceptionWrapper
from . import MP_STATUS_CHECK_INTERVAL
def _pin_memory_loop(in_queue, out_queue, device_id, done_event, device):
# This setting is thread local, and prevents the copy in pin_memory from
# consuming all CPU cores.
torch.set_num_threads(1)
torch.multiprocessing._set_thread_name("pt_data_pin")
if device == "cuda":
torch.cuda.set_device(device_id)
elif device == "xpu":
torch.xpu.set_device(device_id) # type: ignore[attr-defined]
elif device == torch._C._get_privateuse1_backend_name():
custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name())
custom_device_mod.set_device(device_id)
elif device is None:
torch.accelerator.set_device_index(device_id)
def do_one_step():
try:
r = in_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
return
idx, data = r
if not done_event.is_set() and not isinstance(data, ExceptionWrapper):
try:
data = pin_memory(data, device)
except Exception:
data = ExceptionWrapper(
where=f"in pin memory thread for device {device_id}"
)
r = (idx, data)
while not done_event.is_set():
try:
out_queue.put(r, timeout=MP_STATUS_CHECK_INTERVAL)
break
except queue.Full:
continue
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
# logic of this function.
while not done_event.is_set():
# Make sure that we don't preserve any object from one iteration
# to the next
do_one_step()
def pin_memory(data, device=None):
if isinstance(data, torch.Tensor):
return data.pin_memory(device)
elif isinstance(data, (str, bytes)):
return data
elif isinstance(data, collections.abc.Mapping):
try:
if isinstance(data, collections.abc.MutableMapping):
# The sequence type may have extra properties, so we can't just
# use `type(data)(...)` to create the new sequence.
# Create a clone and update it if the sequence type is mutable.
clone = copy.copy(data)
clone.update(
{k: pin_memory(sample, device) for k, sample in data.items()}
)
return clone
else:
return type(data)({k: pin_memory(sample, device) for k, sample in data.items()}) # type: ignore[call-arg]
except TypeError:
# The mapping type may not support `copy()` / `update(mapping)`
# or `__init__(iterable)`.
return {k: pin_memory(sample, device) for k, sample in data.items()}
elif isinstance(data, tuple) and hasattr(data, "_fields"): # namedtuple
return type(data)(*(pin_memory(sample, device) for sample in data))
elif isinstance(data, tuple):
return [
pin_memory(sample, device) for sample in data
] # Backwards compatibility.
elif isinstance(data, collections.abc.Sequence):
try:
if isinstance(data, collections.abc.MutableSequence):
# The sequence type may have extra properties, so we can't just
# use `type(data)(...)` to create the new sequence.
# Create a clone and update it if the sequence type is mutable.
clone = copy.copy(data) # type: ignore[arg-type]
for i, item in enumerate(data):
clone[i] = pin_memory(item, device)
return clone
return type(data)([pin_memory(sample, device) for sample in data]) # type: ignore[call-arg]
except TypeError:
# The sequence type may not support `copy()` / `__setitem__(index, item)`
# or `__init__(iterable)` (e.g., `range`).
return [pin_memory(sample, device) for sample in data]
elif hasattr(data, "pin_memory"):
return data.pin_memory()
else:
return data
|
# mypy: allow-untyped-defs
r"""Contains definitions of the methods used by the _BaseDataLoaderIter to put fetched tensors into pinned memory.
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import collections
import copy
import queue
import torch
from torch._utils import ExceptionWrapper
from . import MP_STATUS_CHECK_INTERVAL
def _pin_memory_loop(in_queue, out_queue, device_id, done_event, device):
# This setting is thread local, and prevents the copy in pin_memory from
# consuming all CPU cores.
torch.set_num_threads(1)
torch.multiprocessing._set_thread_name("pt_data_pin")
torch.accelerator.set_device_index(device_id)
def do_one_step():
try:
r = in_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
return
idx, data = r
if not done_event.is_set() and not isinstance(data, ExceptionWrapper):
try:
data = pin_memory(data, device)
except Exception:
data = ExceptionWrapper(
where=f"in pin memory thread for device {device_id}"
)
r = (idx, data)
while not done_event.is_set():
try:
out_queue.put(r, timeout=MP_STATUS_CHECK_INTERVAL)
break
except queue.Full:
continue
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
# logic of this function.
while not done_event.is_set():
# Make sure that we don't preserve any object from one iteration
# to the next
do_one_step()
def pin_memory(data, device=None):
if isinstance(data, torch.Tensor):
return data.pin_memory(device)
elif isinstance(data, (str, bytes)):
return data
elif isinstance(data, collections.abc.Mapping):
try:
if isinstance(data, collections.abc.MutableMapping):
# The sequence type may have extra properties, so we can't just
# use `type(data)(...)` to create the new sequence.
# Create a clone and update it if the sequence type is mutable.
clone = copy.copy(data)
clone.update(
{k: pin_memory(sample, device) for k, sample in data.items()}
)
return clone
else:
return type(data)({k: pin_memory(sample, device) for k, sample in data.items()}) # type: ignore[call-arg]
except TypeError:
# The mapping type may not support `copy()` / `update(mapping)`
# or `__init__(iterable)`.
return {k: pin_memory(sample, device) for k, sample in data.items()}
elif isinstance(data, tuple) and hasattr(data, "_fields"): # namedtuple
return type(data)(*(pin_memory(sample, device) for sample in data))
elif isinstance(data, tuple):
return [
pin_memory(sample, device) for sample in data
] # Backwards compatibility.
elif isinstance(data, collections.abc.Sequence):
try:
if isinstance(data, collections.abc.MutableSequence):
# The sequence type may have extra properties, so we can't just
# use `type(data)(...)` to create the new sequence.
# Create a clone and update it if the sequence type is mutable.
clone = copy.copy(data) # type: ignore[arg-type]
for i, item in enumerate(data):
clone[i] = pin_memory(item, device)
return clone
return type(data)([pin_memory(sample, device) for sample in data]) # type: ignore[call-arg]
except TypeError:
# The sequence type may not support `copy()` / `__setitem__(index, item)`
# or `__init__(iterable)` (e.g., `range`).
return [pin_memory(sample, device) for sample in data]
elif hasattr(data, "pin_memory"):
return data.pin_memory()
else:
return data
|
from .basic import BasicTextNormalizer as BasicTextNormalizer
from .english import EnglishTextNormalizer as EnglishTextNormalizer
|
from .basic import BasicTextNormalizer
from .english import EnglishTextNormalizer
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--size-divisor',
type=int,
default=32,
help='Pad the input image, the minimum size that is divisible '
'by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
ori_shape = (3, h, w)
divisor = args.size_divisor
if divisor > 0:
h = int(np.ceil(h / divisor)) * divisor
w = int(np.ceil(w / divisor)) * divisor
input_shape = (3, h, w)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
if divisor > 0 and \
input_shape != ori_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {ori_shape} to {input_shape}\n')
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--size-divisor',
type=int,
default=32,
help='Pad the input image, the minimum size that is divisible '
'by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
orig_shape = (3, h, w)
divisor = args.size_divisor
if divisor > 0:
h = int(np.ceil(h / divisor)) * divisor
w = int(np.ceil(w / divisor)) * divisor
input_shape = (3, h, w)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
if divisor > 0 and \
input_shape != orig_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {orig_shape} to {input_shape}\n')
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import uuid
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml_v7 = os.path.abspath(os.path.join(cur_dir, "v7/docker-compose.yml"))
compose_yml_v8 = os.path.abspath(os.path.join(cur_dir, "v8/docker-compose.yml"))
@pytest.fixture(scope="module", autouse=True)
def start_storage_v7():
os.system(f"docker compose -f {compose_yml_v7} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker compose -f {compose_yml_v7} down --remove-orphans")
@pytest.fixture(scope="module", autouse=True)
def start_storage_v8():
os.system(f"docker compose -f {compose_yml_v8} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker compose -f {compose_yml_v8} down --remove-orphans")
def _wait_for_es():
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts="http://localhost:9200/")
while not es.ping():
time.sleep(0.5)
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dims=1000)
class FlatDoc(BaseDoc):
tens_one: NdArray = Field(dims=10)
tens_two: NdArray = Field(dims=50)
class NestedDoc(BaseDoc):
d: SimpleDoc
class DeepNestedDoc(BaseDoc):
d: NestedDoc
class MyImageDoc(ImageDoc):
embedding: NdArray = Field(dims=128)
@pytest.fixture(scope="function")
def ten_simple_docs():
return [SimpleDoc(tens=np.random.randn(10)) for _ in range(10)]
@pytest.fixture(scope="function")
def ten_flat_docs():
return [
FlatDoc(tens_one=np.random.randn(10), tens_two=np.random.randn(50))
for _ in range(10)
]
@pytest.fixture(scope="function")
def ten_nested_docs():
return [NestedDoc(d=SimpleDoc(tens=np.random.randn(10))) for _ in range(10)]
@pytest.fixture(scope="function")
def ten_deep_nested_docs():
return [
DeepNestedDoc(d=NestedDoc(d=SimpleDoc(tens=np.random.randn(10))))
for _ in range(10)
]
@pytest.fixture(scope="function")
def tmp_index_name():
return uuid.uuid4().hex
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import uuid
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml_v7 = os.path.abspath(os.path.join(cur_dir, 'v7/docker-compose.yml'))
compose_yml_v8 = os.path.abspath(os.path.join(cur_dir, 'v8/docker-compose.yml'))
@pytest.fixture(scope='module', autouse=True)
def start_storage_v7():
os.system(f"docker-compose -f {compose_yml_v7} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker-compose -f {compose_yml_v7} down --remove-orphans")
@pytest.fixture(scope='module', autouse=True)
def start_storage_v8():
os.system(f"docker-compose -f {compose_yml_v8} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker-compose -f {compose_yml_v8} down --remove-orphans")
def _wait_for_es():
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='http://localhost:9200/')
while not es.ping():
time.sleep(0.5)
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dims=1000)
class FlatDoc(BaseDoc):
tens_one: NdArray = Field(dims=10)
tens_two: NdArray = Field(dims=50)
class NestedDoc(BaseDoc):
d: SimpleDoc
class DeepNestedDoc(BaseDoc):
d: NestedDoc
class MyImageDoc(ImageDoc):
embedding: NdArray = Field(dims=128)
@pytest.fixture(scope='function')
def ten_simple_docs():
return [SimpleDoc(tens=np.random.randn(10)) for _ in range(10)]
@pytest.fixture(scope='function')
def ten_flat_docs():
return [
FlatDoc(tens_one=np.random.randn(10), tens_two=np.random.randn(50))
for _ in range(10)
]
@pytest.fixture(scope='function')
def ten_nested_docs():
return [NestedDoc(d=SimpleDoc(tens=np.random.randn(10))) for _ in range(10)]
@pytest.fixture(scope='function')
def ten_deep_nested_docs():
return [
DeepNestedDoc(d=NestedDoc(d=SimpleDoc(tens=np.random.randn(10))))
for _ in range(10)
]
@pytest.fixture(scope='function')
def tmp_index_name():
return uuid.uuid4().hex
|
import pickle
from dataclasses import dataclass
from io import BufferedIOBase
from typing import Any
import torch
import torch._weights_only_unpickler as _weights_only_unpickler
from torch.serialization import _load, _save, DEFAULT_PROTOCOL, MAP_LOCATION
__all__: list[str] = []
@dataclass
class _Entry:
key: str
is_storage: bool
length: int
_weights_only_unpickler._add_safe_globals([_Entry])
class _PseudoZipFile:
def __init__(self) -> None:
self.records: dict[str, tuple[object, int]] = {}
def write_record(self, key: str, data: object, length: int) -> None:
self.records[key] = (data, length)
def write_to(self, f: BufferedIOBase) -> None:
entries = []
for key, (data, length) in self.records.items():
entries.append(
_Entry(
key=key,
is_storage=isinstance(data, torch.UntypedStorage),
length=length,
)
)
pickle.dump(entries, f, protocol=DEFAULT_PROTOCOL)
for key, (data, length) in self.records.items():
if isinstance(data, bytes):
f.write(data)
elif isinstance(data, str):
f.write(data.encode("utf-8"))
elif isinstance(data, torch.UntypedStorage):
data._write_file(f, False, False, 1)
else:
raise TypeError(f"unknown type: {type(data)}")
def read_from(self, f: BufferedIOBase) -> None:
entries = _weights_only_unpickler.load(f)
for entry in entries:
data = f.read(entry.length)
if entry.is_storage:
storage = torch.frombuffer(
data,
dtype=torch.uint8,
).untyped_storage()
self.records[entry.key] = (
storage,
entry.length,
)
else:
self.records[entry.key] = (data, entry.length)
def has_record(self, key: str) -> bool:
return key in self.records
def get_record(self, key: str) -> object:
return self.records[key][0]
def get_storage_from_record(
self, key: str, _length: int, _type: int
) -> torch.Tensor:
return torch.tensor(self.records[key][0], dtype=torch.uint8)
def serialization_id(self) -> str:
return "torchft"
def _streaming_save(
obj: object,
f: BufferedIOBase,
pickle_module: Any = pickle,
pickle_protocol: int = DEFAULT_PROTOCOL,
) -> None:
"""
Save the object to a file-like object in a streaming fashion compatible with
network sockets.
This behaves similarly to :func:`torch.save` with a few notable differences:
* A non-seekable file like object can be used when loading.
* No forwards/backwards compatibility is provided for the serialization
format. This is only intended to be used with a single version of PyTorch
with transient storage (i.e. sockets or temp files).
* mmap is not supported
See :func:`torch.save` for more details on specific arguments.
"""
zip_file = _PseudoZipFile()
_save(
obj,
zip_file=zip_file,
pickle_module=pickle_module,
pickle_protocol=pickle_protocol,
_disable_byteorder_record=False,
)
zip_file.write_to(f)
def _streaming_load(
f: BufferedIOBase,
map_location: MAP_LOCATION = None,
pickle_module: Any = None,
*,
weights_only: bool = True,
**pickle_load_args: Any,
) -> object:
"""
Load the object from a file-like object in a streaming fashion compatible with
network sockets.
See :func:`_streaming_save` for more details about the streaming behavior.
See :func:`torch.load` for more details on specific arguments.
"""
if weights_only:
if pickle_module is not None:
raise RuntimeError(
"Can not safely load weights when explicit pickle_module is specified"
)
pickle_module = _weights_only_unpickler
else:
if pickle_module is None:
pickle_module = pickle
if "encoding" not in pickle_load_args.keys():
pickle_load_args["encoding"] = "utf-8"
zip_file = _PseudoZipFile()
zip_file.read_from(f)
return _load(
zip_file=zip_file,
map_location=map_location,
pickle_module=pickle_module,
**pickle_load_args,
)
|
import pickle
from dataclasses import dataclass
from io import BufferedIOBase
from typing import Any
import torch
import torch._weights_only_unpickler as _weights_only_unpickler
from torch.serialization import _load, _save, DEFAULT_PROTOCOL, MAP_LOCATION
__all__: list[str] = []
@dataclass
class _Entry:
key: str
is_storage: bool
length: int
_weights_only_unpickler._add_safe_globals([_Entry])
class _PseudoZipFile:
def __init__(self) -> None:
self.records: dict[str, tuple[object, int]] = {}
def write_record(self, key: str, data: object, length: int) -> None:
self.records[key] = (data, length)
def write_to(self, f: BufferedIOBase) -> None:
entries = []
for key, (data, length) in self.records.items():
entries.append(
_Entry(
key=key,
is_storage=isinstance(data, torch.UntypedStorage),
length=length,
)
)
pickle.dump(entries, f, protocol=DEFAULT_PROTOCOL)
for key, (data, length) in self.records.items():
if isinstance(data, bytes):
f.write(data)
elif isinstance(data, str):
f.write(data.encode("utf-8"))
elif isinstance(data, torch.UntypedStorage):
data._write_file(f, False, False, 1)
else:
raise TypeError(f"unknown type: {type(data)}")
def read_from(self, f: BufferedIOBase) -> None:
entries = _weights_only_unpickler.load(f)
for entry in entries:
data = f.read(entry.length)
if entry.is_storage:
storage = torch.frombuffer(
data,
dtype=torch.uint8,
).untyped_storage()
self.records[entry.key] = (
storage,
entry.length,
)
else:
self.records[entry.key] = (data, entry.length)
def has_record(self, key: str) -> bool:
return key in self.records
def get_record(self, key: str) -> object:
return self.records[key][0]
def get_storage_from_record(
self, key: str, _length: int, _type: int
) -> torch.Tensor:
return torch.tensor(self.records[key][0], dtype=torch.uint8)
def serialization_id(self) -> str:
return "torchft"
def _streaming_save(
obj: object,
f: BufferedIOBase,
pickle_module: Any = pickle,
pickle_protocol: int = DEFAULT_PROTOCOL,
) -> None:
"""
Save the object to a file-like object in a streaming fashion compatible with
network sockets.
This behaves similarly to :func:`torch.save` with a few notable differences:
* A non-seekable file like object can be used when loading.
* No forwards/backwards compatiblity is provided for the serialization
format. This is only intended to be used with a single version of PyTorch
with transient storage (i.e. sockets or temp files).
* mmap is not supported
See :func:`torch.save` for more details on specific arguments.
"""
zip_file = _PseudoZipFile()
_save(
obj,
zip_file=zip_file,
pickle_module=pickle_module,
pickle_protocol=pickle_protocol,
_disable_byteorder_record=False,
)
zip_file.write_to(f)
def _streaming_load(
f: BufferedIOBase,
map_location: MAP_LOCATION = None,
pickle_module: Any = None,
*,
weights_only: bool = True,
**pickle_load_args: Any,
) -> object:
"""
Load the object from a file-like object in a streaming fashion compatible with
network sockets.
See :func:`_streaming_save` for more details about the streaming behavior.
See :func:`torch.load` for more details on specific arguments.
"""
if weights_only:
if pickle_module is not None:
raise RuntimeError(
"Can not safely load weights when explicit pickle_module is specified"
)
pickle_module = _weights_only_unpickler
else:
if pickle_module is None:
pickle_module = pickle
if "encoding" not in pickle_load_args.keys():
pickle_load_args["encoding"] = "utf-8"
zip_file = _PseudoZipFile()
zip_file.read_from(f)
return _load(
zip_file=zip_file,
map_location=map_location,
pickle_module=pickle_module,
**pickle_load_args,
)
|
from fastapi import FastAPI
app = FastAPI(swagger_ui_parameters={"syntaxHighlight": {"theme": "obsidian"}})
@app.get("/users/{username}")
async def read_user(username: str):
return {"message": f"Hello {username}"}
|
from fastapi import FastAPI
app = FastAPI(swagger_ui_parameters={"syntaxHighlight.theme": "obsidian"})
@app.get("/users/{username}")
async def read_user(username: str):
return {"message": f"Hello {username}"}
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
SimCSE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_simcse_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import math
import sys
from datetime import datetime
import tqdm
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Training parameters
model_name = "distilroberta-base"
train_batch_size = 128
max_seq_length = 32
num_epochs = 1
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_simcse{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_samples = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_samples.append(InputExample(texts=[line, line]))
logging.info("Train sentences: {}".format(len(train_samples)))
# We train our model using the MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={"lr": 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
SimCSE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_simcse_from_file.py path/to/sentences.txt
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses
from sentence_transformers import LoggingHandler, SentenceTransformer, InputExample
import logging
from datetime import datetime
import gzip
import sys
import tqdm
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Training parameters
model_name = "distilroberta-base"
train_batch_size = 128
max_seq_length = 32
num_epochs = 1
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_simcse{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_samples = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_samples.append(InputExample(texts=[line, line]))
logging.info("Train sentences: {}".format(len(train_samples)))
# We train our model using the MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={"lr": 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
"""
Python polyfills for sys
"""
from __future__ import annotations
import sys
from ..decorators import substitute_in_graph
__all__ = [
"intern",
"getrecursionlimit",
]
@substitute_in_graph(sys.intern, can_constant_fold_through=True)
def intern(string: str, /) -> str:
return string
@substitute_in_graph(sys.getrecursionlimit, can_constant_fold_through=True)
def getrecursionlimit() -> int:
return sys.getrecursionlimit()
if hasattr(sys, "get_int_max_str_digits"):
@substitute_in_graph(sys.get_int_max_str_digits, can_constant_fold_through=True)
def get_int_max_str_digits() -> int:
return sys.get_int_max_str_digits()
__all__ += ["get_int_max_str_digits"]
|
"""
Python polyfills for sys
"""
from __future__ import annotations
import sys
from ..decorators import substitute_in_graph
__all__ = [
"intern",
"getrecursionlimit",
]
@substitute_in_graph(sys.intern, can_constant_fold_through=True)
def intern(string: str, /) -> str:
return string
@substitute_in_graph(sys.getrecursionlimit, can_constant_fold_through=True)
def getrecursionlimit() -> int:
return sys.getrecursionlimit()
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEF1Evaluator, CESoftmaxAccuracyEvaluator
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.readers import InputExample
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# As dataset, we use SNLI + MultiNLI
# Check if dataset exists. If not, download and extract it
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
# Read the AllNLI.tsv.gz file and create the training dataset
logger.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_samples = []
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
label_id = label2int[row["label"]]
if row["split"] == "train":
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
else:
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Define our CrossEncoder model. We use distilroberta-base as basis and setup it up to predict 3 labels
model = CrossEncoder("distilroberta-base", num_labels=len(label2int))
# We wrap train_samples, which is a list of InputExample, in a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# During training, we use CESoftmaxAccuracyEvaluator and CEF1Evaluator to measure the performance on the dev set
accuracy_evaluator = CESoftmaxAccuracyEvaluator.from_input_examples(dev_samples, name="AllNLI-dev")
f1_evaluator = CEF1Evaluator.from_input_examples(dev_samples, name="AllNLI-dev")
evaluator = SequentialEvaluator([accuracy_evaluator, f1_evaluator])
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=10000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CESoftmaxAccuracyEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
logger = logging.getLogger(__name__)
#### /print debug information to stdout
#As dataset, we use SNLI + MultiNLI
#Check if dataset exsist. If not, download and extract it
nli_dataset_path = 'datasets/AllNLI.tsv.gz'
if not os.path.exists(nli_dataset_path):
util.http_get('https://sbert.net/datasets/AllNLI.tsv.gz', nli_dataset_path)
# Read the AllNLI.tsv.gz file and create the training dataset
logger.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_samples = []
dev_samples = []
with gzip.open(nli_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
label_id = label2int[row['label']]
if row['split'] == 'train':
train_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=label_id))
else:
dev_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=label_id))
train_batch_size = 16
num_epochs = 4
model_save_path = 'output/training_allnli-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#Define our CrossEncoder model. We use distilroberta-base as basis and setup it up to predict 3 labels
model = CrossEncoder('distilroberta-base', num_labels=len(label2int))
#We wrap train_samples, which is a list ot InputExample, in a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
#During training, we use CESoftmaxAccuracyEvaluator to measure the accuracy on the dev set.
evaluator = CESoftmaxAccuracyEvaluator.from_input_examples(dev_samples, name='AllNLI-dev')
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) #10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=10000,
warmup_steps=warmup_steps,
output_path=model_save_path)
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.sparsity(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['active_dims']:.2f}")
print(f"Sparsity percentage: {stats['sparsity_ratio']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'active_dims': 56.66666793823242, 'sparsity_ratio': 0.9981433749198914}
Average non-zero dimensions: 56.67
Sparsity percentage: 99.81%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.sparsity(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['active_dims']:.2f}")
print(f"Sparsity percentage: {stats_limited['sparsity_ratio']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'active_dims': 32.0, 'sparsity_ratio': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.get_sparsity_stats(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['row_non_zero_mean']:.2f}")
print(f"Sparsity percentage: {stats['row_sparsity_mean']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 56.66666793823242, 'row_sparsity_mean': 0.9981433749198914}
Average non-zero dimensions: 56.67
Sparsity percentage: 99.81%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.get_sparsity_stats(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['row_non_zero_mean']:.2f}")
print(f"Sparsity percentage: {stats_limited['row_sparsity_mean']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 32.0, 'row_sparsity_mean': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
import pytest
from mmengine.registry import DefaultScope
class TestDefaultScope:
def test_scope(self):
default_scope = DefaultScope.get_instance('name1', scope_name='mmdet')
assert default_scope.scope_name == 'mmdet'
# `DefaultScope.get_instance` must have `scope_name` argument.
with pytest.raises(TypeError):
DefaultScope.get_instance('name2')
def test_get_current_instance(self):
DefaultScope._instance_dict = OrderedDict()
assert DefaultScope.get_current_instance() is None
DefaultScope.get_instance('instance_name', scope_name='mmengine')
default_scope = DefaultScope.get_current_instance()
assert default_scope.scope_name == 'mmengine'
def test_overwrite_default_scope(self):
origin_scope = DefaultScope.get_instance(
'test_overwrite_default_scope', scope_name='origin_scope')
with DefaultScope.overwrite_default_scope(scope_name=None):
assert DefaultScope.get_current_instance(
).scope_name == 'origin_scope'
with DefaultScope.overwrite_default_scope(scope_name='test_overwrite'):
assert DefaultScope.get_current_instance(
).scope_name == 'test_overwrite'
assert DefaultScope.get_current_instance(
).scope_name == origin_scope.scope_name == 'origin_scope'
# Test overwrite default scope immediately.
# Test sequentially overwrite.
with DefaultScope.overwrite_default_scope(scope_name='test_overwrite'):
pass
with DefaultScope.overwrite_default_scope(scope_name='test_overwrite'):
pass
# Test nested overwrite.
with DefaultScope.overwrite_default_scope(scope_name='test_overwrite'):
with DefaultScope.overwrite_default_scope(
scope_name='test_overwrite'):
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
import pytest
from mmengine.registry import DefaultScope
class TestDefaultScope:
def test_scope(self):
default_scope = DefaultScope.get_instance('name1', scope_name='mmdet')
assert default_scope.scope_name == 'mmdet'
# `DefaultScope.get_instance` must have `scope_name` argument.
with pytest.raises(TypeError):
DefaultScope.get_instance('name2')
def test_get_current_instance(self):
DefaultScope._instance_dict = OrderedDict()
assert DefaultScope.get_current_instance() is None
DefaultScope.get_instance('instance_name', scope_name='mmengine')
default_scope = DefaultScope.get_current_instance()
assert default_scope.scope_name == 'mmengine'
def test_overwrite_default_scope(self):
origin_scope = DefaultScope.get_instance(
'test_overwrite_default_scope', scope_name='origin_scope')
with DefaultScope.overwrite_default_scope(scope_name=None):
assert DefaultScope.get_current_instance(
).scope_name == 'origin_scope'
with DefaultScope.overwrite_default_scope(scope_name='test_overwrite'):
assert DefaultScope.get_current_instance(
).scope_name == 'test_overwrite'
assert DefaultScope.get_current_instance(
).scope_name == origin_scope.scope_name == 'origin_scope'
|
"""Test PandasDataframeParser"""
from typing import Any
import pandas as pd
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers.pandas_dataframe import PandasDataFrameOutputParser
df = pd.DataFrame(
{
"chicken": [1, 2, 3, 4],
"veggies": [5, 4, 3, 2],
"steak": [9, 8, 7, 6],
},
)
parser = PandasDataFrameOutputParser(dataframe=df)
# Test Invalid Column
def test_pandas_output_parser_col_no_array() -> None:
try:
parser.parse("column:num_legs")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Column with invalid array (above DataFrame max index)
def test_pandas_output_parser_col_oob() -> None:
try:
parser.parse("row:10")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Column with array [x]
def test_pandas_output_parser_col_first_elem() -> None:
expected_output = {"chicken": 1}
actual_output = parser.parse("column:chicken[0]")
assert actual_output == expected_output
# Test Column with array [x,y,z]
def test_pandas_output_parser_col_multi_elem() -> None:
expected_output = {"chicken": pd.Series([1, 2], name="chicken", dtype="int64")}
actual_output = parser.parse("column:chicken[0, 1]")
for key in actual_output:
assert expected_output["chicken"].equals(actual_output[key])
# Test Row with invalid row entry
def test_pandas_output_parser_row_no_array() -> None:
try:
parser.parse("row:5")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Row with valid row entry
def test_pandas_output_parser_row_first() -> None:
expected_output = {"1": pd.Series({"chicken": 2, "veggies": 4, "steak": 8})}
actual_output = parser.parse("row:1")
assert actual_output["1"].equals(expected_output["1"])
# Test Row with invalid col entry
def test_pandas_output_parser_row_no_column() -> None:
try:
parser.parse("row:1[num_legs]")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Row with valid col entry
def test_pandas_output_parser_row_col_1() -> None:
expected_output = {"1": 2}
actual_output = parser.parse("row:1[chicken]")
assert actual_output == expected_output
def test_pandas_output_parser_special_ops() -> None:
actual_output = [
{"mean": 3.0},
{"median": 3.0},
{"min": 2},
{"max": 4},
{"var": 1.0},
{"std": 1.0},
{"count": 3},
{"quantile": 3.0},
]
expected_output = [
parser.parse("mean:chicken[1..3]"),
parser.parse("median:chicken[1..3]"),
parser.parse("min:chicken[1..3]"),
parser.parse("max:chicken[1..3]"),
parser.parse("var:chicken[1..3]"),
parser.parse("std:chicken[1..3]"),
parser.parse("count:chicken[1..3]"),
parser.parse("quantile:chicken[1..3]"),
]
assert actual_output == expected_output
def test_pandas_output_parser_invalid_special_op() -> None:
try:
parser.parse("riemann_sum:chicken")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
def test_pandas_output_parser_output_type() -> None:
"""Test the output type of the pandas dataframe output parser is a pandas dataframe.""" # noqa: E501
assert parser.OutputType == dict[str, Any]
|
"""Test PandasDataframeParser"""
from typing import Any
import pandas as pd
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers.pandas_dataframe import PandasDataFrameOutputParser
df = pd.DataFrame(
{
"chicken": [1, 2, 3, 4],
"veggies": [5, 4, 3, 2],
"steak": [9, 8, 7, 6],
}
)
parser = PandasDataFrameOutputParser(dataframe=df)
# Test Invalid Column
def test_pandas_output_parser_col_no_array() -> None:
try:
parser.parse("column:num_legs")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Column with invalid array (above DataFrame max index)
def test_pandas_output_parser_col_oob() -> None:
try:
parser.parse("row:10")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Column with array [x]
def test_pandas_output_parser_col_first_elem() -> None:
expected_output = {"chicken": 1}
actual_output = parser.parse("column:chicken[0]")
assert actual_output == expected_output
# Test Column with array [x,y,z]
def test_pandas_output_parser_col_multi_elem() -> None:
expected_output = {"chicken": pd.Series([1, 2], name="chicken", dtype="int64")}
actual_output = parser.parse("column:chicken[0, 1]")
for key in actual_output:
assert expected_output["chicken"].equals(actual_output[key])
# Test Row with invalid row entry
def test_pandas_output_parser_row_no_array() -> None:
try:
parser.parse("row:5")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Row with valid row entry
def test_pandas_output_parser_row_first() -> None:
expected_output = {"1": pd.Series({"chicken": 2, "veggies": 4, "steak": 8})}
actual_output = parser.parse("row:1")
assert actual_output["1"].equals(expected_output["1"])
# Test Row with invalid col entry
def test_pandas_output_parser_row_no_column() -> None:
try:
parser.parse("row:1[num_legs]")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
# Test Row with valid col entry
def test_pandas_output_parser_row_col_1() -> None:
expected_output = {"1": 2}
actual_output = parser.parse("row:1[chicken]")
assert actual_output == expected_output
def test_pandas_output_parser_special_ops() -> None:
actual_output = [
{"mean": 3.0},
{"median": 3.0},
{"min": 2},
{"max": 4},
{"var": 1.0},
{"std": 1.0},
{"count": 3},
{"quantile": 3.0},
]
expected_output = [
parser.parse("mean:chicken[1..3]"),
parser.parse("median:chicken[1..3]"),
parser.parse("min:chicken[1..3]"),
parser.parse("max:chicken[1..3]"),
parser.parse("var:chicken[1..3]"),
parser.parse("std:chicken[1..3]"),
parser.parse("count:chicken[1..3]"),
parser.parse("quantile:chicken[1..3]"),
]
assert actual_output == expected_output
def test_pandas_output_parser_invalid_special_op() -> None:
try:
parser.parse("riemann_sum:chicken")
assert False, "Should have raised OutputParserException"
except OutputParserException:
assert True
def test_pandas_output_parser_output_type() -> None:
"""Test the output type of the pandas dataframe output parser is a pandas dataframe.""" # noqa: E501
assert parser.OutputType == dict[str, Any]
|
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
# TODO: find a better way to collect image_level_labels
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'instances', 'image_level_labels'))
]
train_dataloader = dict(
batch_size=2,
num_workers=0, # workers_per_gpu > 0 may occur out of memory
persistent_workers=False,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/train-image-metas.pkl',
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=0,
persistent_workers=False,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/validation-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/validation/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/validation-image-metas.pkl',
image_level_ann_file='annotations/validation-'
'annotations-human-imagelabels-boxable.csv',
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='OpenImagesMetric',
iou_thrs=0.5,
ioa_thrs=0.5,
use_group_of=True,
get_supercategory=True)
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1024, 800), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
# TODO: find a better way to collect image_level_labels
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'instances', 'image_level_labels'))
]
train_dataloader = dict(
batch_size=2,
num_workers=0, # workers_per_gpu > 0 may occur out of memory
persistent_workers=False,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/train-image-metas.pkl',
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=0,
persistent_workers=False,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/validation-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/validation/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/validation-image-metas.pkl',
image_level_ann_file='annotations/validation-'
'annotations-human-imagelabels-boxable.csv',
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='OpenImagesMetric',
iou_thrs=0.5,
ioa_thrs=0.5,
use_group_of=True,
get_supercategory=True)
test_evaluator = val_evaluator
|
from typing import Dict
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
import torch
from torch.utils.data import DataLoader
import logging
from ..util import batch_to_device
import os
import csv
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
Args:
dataloader (DataLoader): the data for the evaluation
"""
super().__init__()
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
self.primary_metric = "accuracy"
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info("Accuracy: {:.4f} ({}/{})\n".format(accuracy, correct, total))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
metrics = {"accuracy": accuracy}
metrics = self.prefix_name_to_metrics(metrics, self.name)
self.store_metrics_in_model_card_data(model, metrics)
return metrics
|
from typing import Dict
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
import torch
from torch.utils.data import DataLoader
import logging
from ..util import batch_to_device
import os
import csv
logger = logging.getLogger(__name__)
class LabelAccuracyEvaluator(SentenceEvaluator):
"""
Evaluate a model based on its accuracy on a labeled dataset
This requires a model with LossFunction.SOFTMAX
The results are written in a CSV. If a CSV already exists, then values are appended.
"""
def __init__(self, dataloader: DataLoader, name: str = "", softmax_model=None, write_csv: bool = True):
"""
Constructs an evaluator for the given dataset
:param dataloader:
the data for the evaluation
"""
super().__init__()
self.dataloader = dataloader
self.name = name
self.softmax_model = softmax_model
if name:
name = "_" + name
self.write_csv = write_csv
self.csv_file = "accuracy_evaluation" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "accuracy"]
self.primary_metric = "accuracy"
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
model.eval()
total = 0
correct = 0
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("Evaluation on the " + self.name + " dataset" + out_txt)
self.dataloader.collate_fn = model.smart_batching_collate
for step, batch in enumerate(self.dataloader):
features, label_ids = batch
for idx in range(len(features)):
features[idx] = batch_to_device(features[idx], model.device)
label_ids = label_ids.to(model.device)
with torch.no_grad():
_, prediction = self.softmax_model(features, labels=None)
total += prediction.size(0)
correct += torch.argmax(prediction, dim=1).eq(label_ids).sum().item()
accuracy = correct / total
logger.info("Accuracy: {:.4f} ({}/{})\n".format(accuracy, correct, total))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
if not os.path.isfile(csv_path):
with open(csv_path, newline="", mode="w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy])
else:
with open(csv_path, newline="", mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy])
metrics = {"accuracy": accuracy}
metrics = self.prefix_name_to_metrics(metrics, self.name)
self.store_metrics_in_model_card_data(model, metrics)
return metrics
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from mmengine.fileio import dump, list_from_file
from mmengine.utils import mkdir_or_exist, scandir, track_iter_progress
from PIL import Image
def parse_args():
parser = argparse.ArgumentParser(
description='Convert images to coco format without annotations')
parser.add_argument('img_path', help='The root path of images')
parser.add_argument(
'classes', type=str, help='The text file name of storage class list')
parser.add_argument(
'out',
type=str,
help='The output annotation json file name, The save dir is in the '
'same directory as img_path')
parser.add_argument(
'-e',
'--exclude-extensions',
type=str,
nargs='+',
help='The suffix of images to be excluded, such as "png" and "bmp"')
args = parser.parse_args()
return args
def collect_image_infos(path, exclude_extensions=None):
img_infos = []
images_generator = scandir(path, recursive=True)
for image_path in track_iter_progress(list(images_generator)):
if exclude_extensions is None or (
exclude_extensions is not None
and not image_path.lower().endswith(exclude_extensions)):
image_path = os.path.join(path, image_path)
img_pillow = Image.open(image_path)
img_info = {
'filename': image_path,
'width': img_pillow.width,
'height': img_pillow.height,
}
img_infos.append(img_info)
return img_infos
def cvt_to_coco_json(img_infos, classes):
image_id = 0
coco = dict()
coco['images'] = []
coco['type'] = 'instance'
coco['categories'] = []
coco['annotations'] = []
image_set = set()
for category_id, name in enumerate(classes):
category_item = dict()
category_item['supercategory'] = str('none')
category_item['id'] = int(category_id)
category_item['name'] = str(name)
coco['categories'].append(category_item)
for img_dict in img_infos:
file_name = img_dict['filename']
assert file_name not in image_set
image_item = dict()
image_item['id'] = int(image_id)
image_item['file_name'] = str(file_name)
image_item['height'] = int(img_dict['height'])
image_item['width'] = int(img_dict['width'])
coco['images'].append(image_item)
image_set.add(file_name)
image_id += 1
return coco
def main():
args = parse_args()
assert args.out.endswith(
'json'), 'The output file name must be json suffix'
# 1 load image list info
img_infos = collect_image_infos(args.img_path, args.exclude_extensions)
# 2 convert to coco format data
classes = list_from_file(args.classes)
coco_info = cvt_to_coco_json(img_infos, classes)
# 3 dump
save_dir = os.path.join(args.img_path, '..', 'annotations')
mkdir_or_exist(save_dir)
save_path = os.path.join(save_dir, args.out)
dump(coco_info, save_path)
print(f'save json file: {save_path}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import mmcv
from PIL import Image
def parse_args():
parser = argparse.ArgumentParser(
description='Convert images to coco format without annotations')
parser.add_argument('img_path', help='The root path of images')
parser.add_argument(
'classes', type=str, help='The text file name of storage class list')
parser.add_argument(
'out',
type=str,
help='The output annotation json file name, The save dir is in the '
'same directory as img_path')
parser.add_argument(
'-e',
'--exclude-extensions',
type=str,
nargs='+',
help='The suffix of images to be excluded, such as "png" and "bmp"')
args = parser.parse_args()
return args
def collect_image_infos(path, exclude_extensions=None):
img_infos = []
images_generator = mmcv.scandir(path, recursive=True)
for image_path in mmcv.track_iter_progress(list(images_generator)):
if exclude_extensions is None or (
exclude_extensions is not None
and not image_path.lower().endswith(exclude_extensions)):
image_path = os.path.join(path, image_path)
img_pillow = Image.open(image_path)
img_info = {
'filename': image_path,
'width': img_pillow.width,
'height': img_pillow.height,
}
img_infos.append(img_info)
return img_infos
def cvt_to_coco_json(img_infos, classes):
image_id = 0
coco = dict()
coco['images'] = []
coco['type'] = 'instance'
coco['categories'] = []
coco['annotations'] = []
image_set = set()
for category_id, name in enumerate(classes):
category_item = dict()
category_item['supercategory'] = str('none')
category_item['id'] = int(category_id)
category_item['name'] = str(name)
coco['categories'].append(category_item)
for img_dict in img_infos:
file_name = img_dict['filename']
assert file_name not in image_set
image_item = dict()
image_item['id'] = int(image_id)
image_item['file_name'] = str(file_name)
image_item['height'] = int(img_dict['height'])
image_item['width'] = int(img_dict['width'])
coco['images'].append(image_item)
image_set.add(file_name)
image_id += 1
return coco
def main():
args = parse_args()
assert args.out.endswith(
'json'), 'The output file name must be json suffix'
# 1 load image list info
img_infos = collect_image_infos(args.img_path, args.exclude_extensions)
# 2 convert to coco format data
classes = mmcv.list_from_file(args.classes)
coco_info = cvt_to_coco_json(img_infos, classes)
# 3 dump
save_dir = os.path.join(args.img_path, '..', 'annotations')
mmcv.mkdir_or_exist(save_dir)
save_path = os.path.join(save_dir, args.out)
mmcv.dump(coco_info, save_path)
print(f'save json file: {save_path}')
if __name__ == '__main__':
main()
|
from pydantic import BaseModel
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchema,
BlockWebhookConfig,
)
from backend.data.model import SchemaField
from backend.integrations.providers import ProviderName
from backend.util import settings
from backend.util.settings import AppEnvironment, BehaveAs
from ._api import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
Slant3DCredentialsField,
Slant3DCredentialsInput,
)
class Slant3DTriggerBase:
"""Base class for Slant3D webhook triggers"""
class Input(BlockSchema):
credentials: Slant3DCredentialsInput = Slant3DCredentialsField()
# Webhook URL is handled by the webhook system
payload: dict = SchemaField(hidden=True, default={})
class Output(BlockSchema):
payload: dict = SchemaField(
description="The complete webhook payload received from Slant3D"
)
order_id: str = SchemaField(description="The ID of the affected order")
error: str = SchemaField(
description="Error message if payload processing failed"
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "payload", input_data.payload
yield "order_id", input_data.payload["orderId"]
class Slant3DOrderWebhookBlock(Slant3DTriggerBase, Block):
"""Block for handling Slant3D order webhooks"""
class Input(Slant3DTriggerBase.Input):
class EventsFilter(BaseModel):
"""
Currently Slant3D only supports 'SHIPPED' status updates
Could be expanded in the future with more status types
"""
shipped: bool = True
events: EventsFilter = SchemaField(
title="Events",
description="Order status events to subscribe to",
default=EventsFilter(shipped=True),
)
class Output(Slant3DTriggerBase.Output):
status: str = SchemaField(description="The new status of the order")
tracking_number: str = SchemaField(
description="The tracking number for the shipment"
)
carrier_code: str = SchemaField(description="The carrier code (e.g., 'usps')")
def __init__(self):
super().__init__(
id="8a74c2ad-0104-4640-962f-26c6b69e58cd",
description=(
"This block triggers on Slant3D order status updates and outputs "
"the event details, including tracking information when orders are shipped."
),
# All webhooks are currently subscribed to for all orders. This works for self hosted, but not for cloud hosted prod
disabled=(
settings.Settings().config.behave_as == BehaveAs.CLOUD
and settings.Settings().config.app_env != AppEnvironment.LOCAL
),
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=self.Input,
output_schema=self.Output,
webhook_config=BlockWebhookConfig(
provider=ProviderName.SLANT3D,
webhook_type="orders", # Only one type for now
resource_format="", # No resource format needed
event_filter_input="events",
event_format="order.{event}",
),
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"events": {"shipped": True},
"payload": {
"orderId": "1234567890",
"status": "SHIPPED",
"trackingNumber": "ABCDEF123456",
"carrierCode": "usps",
},
},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"payload",
{
"orderId": "1234567890",
"status": "SHIPPED",
"trackingNumber": "ABCDEF123456",
"carrierCode": "usps",
},
),
("order_id", "1234567890"),
("status", "SHIPPED"),
("tracking_number", "ABCDEF123456"),
("carrier_code", "usps"),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
yield from super().run(input_data, **kwargs)
# Extract and normalize values from the payload
yield "status", input_data.payload["status"]
yield "tracking_number", input_data.payload["trackingNumber"]
yield "carrier_code", input_data.payload["carrierCode"]
|
from pydantic import BaseModel
from backend.data.block import (
Block,
BlockCategory,
BlockOutput,
BlockSchema,
BlockWebhookConfig,
)
from backend.data.model import SchemaField
from backend.util import settings
from backend.util.settings import AppEnvironment, BehaveAs
from ._api import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
Slant3DCredentialsField,
Slant3DCredentialsInput,
)
class Slant3DTriggerBase:
"""Base class for Slant3D webhook triggers"""
class Input(BlockSchema):
credentials: Slant3DCredentialsInput = Slant3DCredentialsField()
# Webhook URL is handled by the webhook system
payload: dict = SchemaField(hidden=True, default={})
class Output(BlockSchema):
payload: dict = SchemaField(
description="The complete webhook payload received from Slant3D"
)
order_id: str = SchemaField(description="The ID of the affected order")
error: str = SchemaField(
description="Error message if payload processing failed"
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "payload", input_data.payload
yield "order_id", input_data.payload["orderId"]
class Slant3DOrderWebhookBlock(Slant3DTriggerBase, Block):
"""Block for handling Slant3D order webhooks"""
class Input(Slant3DTriggerBase.Input):
class EventsFilter(BaseModel):
"""
Currently Slant3D only supports 'SHIPPED' status updates
Could be expanded in the future with more status types
"""
shipped: bool = True
events: EventsFilter = SchemaField(
title="Events",
description="Order status events to subscribe to",
default=EventsFilter(shipped=True),
)
class Output(Slant3DTriggerBase.Output):
status: str = SchemaField(description="The new status of the order")
tracking_number: str = SchemaField(
description="The tracking number for the shipment"
)
carrier_code: str = SchemaField(description="The carrier code (e.g., 'usps')")
def __init__(self):
super().__init__(
id="8a74c2ad-0104-4640-962f-26c6b69e58cd",
description=(
"This block triggers on Slant3D order status updates and outputs "
"the event details, including tracking information when orders are shipped."
),
# All webhooks are currently subscribed to for all orders. This works for self hosted, but not for cloud hosted prod
disabled=(
settings.Settings().config.behave_as == BehaveAs.CLOUD
and settings.Settings().config.app_env != AppEnvironment.LOCAL
),
categories={BlockCategory.DEVELOPER_TOOLS},
input_schema=self.Input,
output_schema=self.Output,
webhook_config=BlockWebhookConfig(
provider="slant3d",
webhook_type="orders", # Only one type for now
resource_format="", # No resource format needed
event_filter_input="events",
event_format="order.{event}",
),
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"events": {"shipped": True},
"payload": {
"orderId": "1234567890",
"status": "SHIPPED",
"trackingNumber": "ABCDEF123456",
"carrierCode": "usps",
},
},
test_credentials=TEST_CREDENTIALS,
test_output=[
(
"payload",
{
"orderId": "1234567890",
"status": "SHIPPED",
"trackingNumber": "ABCDEF123456",
"carrierCode": "usps",
},
),
("order_id", "1234567890"),
("status", "SHIPPED"),
("tracking_number", "ABCDEF123456"),
("carrier_code", "usps"),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput: # type: ignore
yield from super().run(input_data, **kwargs)
# Extract and normalize values from the payload
yield "status", input_data.payload["status"]
yield "tracking_number", input_data.payload["trackingNumber"]
yield "carrier_code", input_data.payload["carrierCode"]
|
#!/usr/bin/env python3
"""This is the preprocessing script for HuBERT model training.
The script includes:
- File list creation
- MFCC/HuBERT feature extraction
- KMeans clustering model training
- Pseudo-label generation
"""
import logging
from argparse import ArgumentParser, RawTextHelpFormatter
from pathlib import Path
import torch
from utils import create_tsv, dump_features, get_km_label, learn_kmeans
def _init_logger(debug=False):
message_fmt = "%(levelname)5s: %(funcName)10s: %(message)s" if debug else "%(message)s"
logging.basicConfig(
level=logging.DEBUG if debug else logging.INFO,
format=f"%(asctime)s: {message_fmt}",
)
def _parse_args():
parser = ArgumentParser(
description=__doc__,
formatter_class=RawTextHelpFormatter,
)
parser.add_argument("--debug", action="store_true", help="Enable debug log")
parser.add_argument("--dataset", default="librispeech", type=str, choices=["librispeech", "librilight"])
parser.add_argument(
"--root-dir",
type=Path,
help="The path to the directory where the directory ``LibriSpeech`` or ``LibriLight`` is stored.",
)
parser.add_argument("--num-rank", default=5, type=int)
parser.add_argument("--feat-type", default="mfcc", choices=["mfcc", "hubert"], type=str)
parser.add_argument(
"--layer-index",
default=6,
type=int,
help="The layer index in HuBERT model for feature extraction. (``1`` means the first layer output)",
)
parser.add_argument(
"--checkpoint-path",
default=None,
type=Path,
help="The model checkpoint of hubert_pretrain_base model.",
)
parser.add_argument("--use-gpu", default=False, type=bool)
parser.add_argument(
"--exp-dir",
type=Path,
help="The directory to store the experiment outputs.",
)
parser.add_argument(
"--num-cluster",
default=100,
type=int,
help="The number of clusters for KMeans clustering.",
)
args = parser.parse_args()
return args
def main(args):
_init_logger(args.debug)
if not args.exp_dir.exists():
args.exp_dir.mkdir()
if args.feat_type == "mfcc":
data_dir = args.exp_dir / "data" / "mfcc"
else:
data_dir = args.exp_dir / "data" / f"{args.feat_type}_{args.layer_index}"
data_dir.mkdir(parents=True, exist_ok=True)
tsv_dir = data_dir / "tsv"
feat_dir = data_dir / "feat"
km_dir = data_dir / "km_model"
label_dir = data_dir / "label"
if args.use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
# Create file lists for training and validation (optional)
create_tsv(args.root_dir, tsv_dir)
# Extract features for KMeans clustering
if not feat_dir.exists():
feat_dir.mkdir()
for split in ["train", "valid"]:
for rank in range(1, args.num_rank + 1):
dump_features(
tsv_dir / f"{args.dataset}_{split}.tsv",
feat_dir,
split,
rank,
args.num_rank,
device,
args.feat_type,
args.layer_index,
args.checkpoint_path,
16_000,
)
# Fit KMeans clustering model
learn_kmeans(
feat_dir,
"train",
args.num_rank,
km_dir,
args.num_cluster,
)
# Predict labels for MFCC or HuBERT features
for split in ["train", "valid"]:
get_km_label(
feat_dir,
km_dir,
label_dir,
split,
args.num_rank,
device,
)
if __name__ == "__main__":
main(_parse_args())
|
#!/usr/bin/env python3
"""This is the preprocessing script for HuBERT model training.
The script includes:
- File list creation
- MFCC/HuBERT feature extraction
- KMeans clustering model training
- Pseudo-label generation
"""
import logging
from argparse import ArgumentParser, RawTextHelpFormatter
from pathlib import Path
import torch
from utils import (
create_tsv,
dump_features,
learn_kmeans,
get_km_label,
)
def _init_logger(debug=False):
message_fmt = "%(levelname)5s: %(funcName)10s: %(message)s" if debug else "%(message)s"
logging.basicConfig(
level=logging.DEBUG if debug else logging.INFO,
format=f"%(asctime)s: {message_fmt}",
)
def _parse_args():
parser = ArgumentParser(
description=__doc__,
formatter_class=RawTextHelpFormatter,
)
parser.add_argument("--debug", action="store_true", help="Enable debug log")
parser.add_argument("--dataset", default="librispeech", type=str, choices=["librispeech", "librilight"])
parser.add_argument(
"--root-dir",
type=Path,
help="The path to the directory where the directory ``LibriSpeech`` or ``LibriLight`` is stored.",
)
parser.add_argument("--num-rank", default=5, type=int)
parser.add_argument("--feat-type", default="mfcc", choices=["mfcc", "hubert"], type=str)
parser.add_argument(
"--layer-index",
default=6,
type=int,
help="The layer index in HuBERT model for feature extraction. (``1`` means the first layer output)",
)
parser.add_argument(
"--checkpoint-path",
default=None,
type=Path,
help="The model checkpoint of hubert_pretrain_base model.",
)
parser.add_argument("--use-gpu", default=False, type=bool)
parser.add_argument(
"--exp-dir",
type=Path,
help="The directory to store the experiment outputs.",
)
parser.add_argument(
"--num-cluster",
default=100,
type=int,
help="The number of clusters for KMeans clustering.",
)
args = parser.parse_args()
return args
def main(args):
_init_logger(args.debug)
if not args.exp_dir.exists():
args.exp_dir.mkdir()
if args.feat_type == "mfcc":
data_dir = args.exp_dir / "data" / "mfcc"
else:
data_dir = args.exp_dir / "data" / f"{args.feat_type}_{args.layer_index}"
data_dir.mkdir(parents=True, exist_ok=True)
tsv_dir = data_dir / "tsv"
feat_dir = data_dir / "feat"
km_dir = data_dir / "km_model"
label_dir = data_dir / "label"
if args.use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
# Create file lists for training and validation (optional)
create_tsv(args.root_dir, tsv_dir)
# Extract features for KMeans clustering
if not feat_dir.exists():
feat_dir.mkdir()
for split in ["train", "valid"]:
for rank in range(1, args.num_rank + 1):
dump_features(
tsv_dir / f"{args.dataset}_{split}.tsv",
feat_dir,
split,
rank,
args.num_rank,
device,
args.feat_type,
args.layer_index,
args.checkpoint_path,
16_000,
)
# Fit KMeans clustering model
learn_kmeans(
feat_dir,
"train",
args.num_rank,
km_dir,
args.num_cluster,
)
# Predict labels for MFCC or HuBERT features
for split in ["train", "valid"]:
get_km_label(
feat_dir,
km_dir,
label_dir,
split,
args.num_rank,
device,
)
if __name__ == "__main__":
main(_parse_args())
|
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
RunnableSerializable --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
<name> # Examples: BraveSearch, HumanInputRun
**Main helpers:**
.. code-block::
CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
""" # noqa: E501
from __future__ import annotations
from langchain_core.tools.base import (
FILTERED_ARGS as FILTERED_ARGS,
)
from langchain_core.tools.base import (
ArgsSchema as ArgsSchema,
)
from langchain_core.tools.base import (
BaseTool as BaseTool,
)
from langchain_core.tools.base import (
BaseToolkit as BaseToolkit,
)
from langchain_core.tools.base import (
InjectedToolArg as InjectedToolArg,
)
from langchain_core.tools.base import InjectedToolCallId as InjectedToolCallId
from langchain_core.tools.base import SchemaAnnotationError as SchemaAnnotationError
from langchain_core.tools.base import (
ToolException as ToolException,
)
from langchain_core.tools.base import (
_get_runnable_config_param as _get_runnable_config_param,
)
from langchain_core.tools.base import (
create_schema_from_function as create_schema_from_function,
)
from langchain_core.tools.convert import (
convert_runnable_to_tool as convert_runnable_to_tool,
)
from langchain_core.tools.convert import tool as tool
from langchain_core.tools.render import ToolsRenderer as ToolsRenderer
from langchain_core.tools.render import (
render_text_description as render_text_description,
)
from langchain_core.tools.render import (
render_text_description_and_args as render_text_description_and_args,
)
from langchain_core.tools.retriever import RetrieverInput as RetrieverInput
from langchain_core.tools.retriever import (
create_retriever_tool as create_retriever_tool,
)
from langchain_core.tools.simple import Tool as Tool
from langchain_core.tools.structured import StructuredTool as StructuredTool
|
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
RunnableSerializable --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
<name> # Examples: BraveSearch, HumanInputRun
**Main helpers:**
.. code-block::
CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
""" # noqa: E501
from __future__ import annotations
from langchain_core.tools.base import (
FILTERED_ARGS as FILTERED_ARGS,
)
from langchain_core.tools.base import (
BaseTool as BaseTool,
)
from langchain_core.tools.base import (
BaseToolkit as BaseToolkit,
)
from langchain_core.tools.base import (
InjectedToolArg as InjectedToolArg,
)
from langchain_core.tools.base import InjectedToolCallId as InjectedToolCallId
from langchain_core.tools.base import SchemaAnnotationError as SchemaAnnotationError
from langchain_core.tools.base import (
ToolException as ToolException,
)
from langchain_core.tools.base import (
_get_runnable_config_param as _get_runnable_config_param,
)
from langchain_core.tools.base import (
create_schema_from_function as create_schema_from_function,
)
from langchain_core.tools.convert import (
convert_runnable_to_tool as convert_runnable_to_tool,
)
from langchain_core.tools.convert import tool as tool
from langchain_core.tools.render import ToolsRenderer as ToolsRenderer
from langchain_core.tools.render import (
render_text_description as render_text_description,
)
from langchain_core.tools.render import (
render_text_description_and_args as render_text_description_and_args,
)
from langchain_core.tools.retriever import RetrieverInput as RetrieverInput
from langchain_core.tools.retriever import (
create_retriever_tool as create_retriever_tool,
)
from langchain_core.tools.simple import Tool as Tool
from langchain_core.tools.structured import StructuredTool as StructuredTool
|
import multiprocessing
import pytest
from jina import Client
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime
from jina.serve.runtimes.gateway.http import HTTPGatewayRuntime
from jina.serve.runtimes.gateway.websocket import WebSocketGatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
def _create_worker_runtime(port, name='', executor=None):
args = set_pod_parser().parse_args([])
args.port = port
args.name = name
if executor:
args.uses = executor
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _create_gateway_runtime(graph_description, pod_addresses, port, protocol='grpc'):
if protocol == 'http':
gateway_runtime = HTTPGatewayRuntime
elif protocol == 'websocket':
gateway_runtime = WebSocketGatewayRuntime
else:
gateway_runtime = GRPCGatewayRuntime
with gateway_runtime(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
]
)
) as runtime:
runtime.run_forever()
def _setup(worker_port, port, protocol):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port,)
)
worker_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port, protocol),
)
gateway_process.start()
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{worker_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
return worker_process, gateway_process
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', port=port, protocol=protocol)
dry_run_alive = c.dry_run()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = c.dry_run()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_async_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', asyncio=True, port=port, protocol=protocol)
dry_run_alive = await c.dry_run()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = await c.dry_run()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
import asyncio
import json
import multiprocessing
import threading
import time
from collections import defaultdict
import pytest
from jina import Client, Document, Executor, requests
from jina.enums import PollingType
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime
from jina.serve.runtimes.gateway.http import HTTPGatewayRuntime
from jina.serve.runtimes.gateway.websocket import WebSocketGatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
def _create_worker_runtime(port, name='', executor=None):
args = set_pod_parser().parse_args([])
args.port = port
args.name = name
if executor:
args.uses = executor
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _create_gateway_runtime(graph_description, pod_addresses, port, protocol='grpc'):
if protocol == 'http':
gateway_runtime = HTTPGatewayRuntime
elif protocol == 'websocket':
gateway_runtime = WebSocketGatewayRuntime
else:
gateway_runtime = GRPCGatewayRuntime
with gateway_runtime(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
]
)
) as runtime:
runtime.run_forever()
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port,)
)
worker_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port, protocol),
)
gateway_process.start()
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{worker_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
# send requests to the gateway
c = Client(host='localhost', port=port, asyncio=True, protocol=protocol)
dry_run_alive = c.dry_run()
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = c.dry_run()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
# Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import (BatchFixedSizePad, BatchSyncRandomResize,
DetDataPreprocessor)
__all__ = ['DetDataPreprocessor', 'BatchSyncRandomResize', 'BatchFixedSizePad']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import BatchSyncRandomResize, DetDataPreprocessor
__all__ = ['DetDataPreprocessor', 'BatchSyncRandomResize']
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='PAA',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='PAAHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
type='PAA',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='PAAHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
"""Init file of LlamaIndex."""
__version__ = "0.12.16"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.15"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.