input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility that updates the metadata of the Diffusers library in the repository `huggingface/diffusers-metadata`.
Usage for an update (as used by the GitHub action `update_metadata`):
```bash
python utils/update_metadata.py
```
Script modified from:
https://github.com/huggingface/transformers/blob/main/utils/update_metadata.py
"""
import argparse
import os
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from diffusers.pipelines.auto_pipeline import (
AUTO_IMAGE2IMAGE_PIPELINES_MAPPING,
AUTO_INPAINT_PIPELINES_MAPPING,
AUTO_TEXT2IMAGE_PIPELINES_MAPPING,
)
PIPELINE_TAG_JSON = "pipeline_tags.json"
def get_supported_pipeline_table() -> dict:
"""
Generates a dictionary containing the supported auto classes for each pipeline type,
using the content of the auto modules.
"""
# All supported pipelines for automatic mapping.
all_supported_pipeline_classes = [
(class_name.__name__, "text-to-image", "AutoPipelineForText2Image")
for _, class_name in AUTO_TEXT2IMAGE_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes += [
(class_name.__name__, "image-to-image", "AutoPipelineForImage2Image")
for _, class_name in AUTO_IMAGE2IMAGE_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes += [
(class_name.__name__, "image-to-image", "AutoPipelineForInpainting")
for _, class_name in AUTO_INPAINT_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes = list(set(all_supported_pipeline_classes))
all_supported_pipeline_classes.sort(key=lambda x: x[0])
data = {}
data["pipeline_class"] = [sample[0] for sample in all_supported_pipeline_classes]
data["pipeline_tag"] = [sample[1] for sample in all_supported_pipeline_classes]
data["auto_class"] = [sample[2] for sample in all_supported_pipeline_classes]
return data
def update_metadata(commit_sha: str):
"""
Update the metadata for the Diffusers repo in `huggingface/diffusers-metadata`.
Args:
commit_sha (`str`): The commit SHA on Diffusers corresponding to this update.
"""
pipelines_table = get_supported_pipeline_table()
pipelines_table = pd.DataFrame(pipelines_table)
pipelines_dataset = Dataset.from_pandas(pipelines_table)
hub_pipeline_tags_json = hf_hub_download(
repo_id="huggingface/diffusers-metadata",
filename=PIPELINE_TAG_JSON,
repo_type="dataset",
)
with open(hub_pipeline_tags_json) as f:
hub_pipeline_tags_json = f.read()
with tempfile.TemporaryDirectory() as tmp_dir:
pipelines_dataset.to_json(os.path.join(tmp_dir, PIPELINE_TAG_JSON))
with open(os.path.join(tmp_dir, PIPELINE_TAG_JSON)) as f:
pipeline_tags_json = f.read()
hub_pipeline_tags_equal = hub_pipeline_tags_json == pipeline_tags_json
if hub_pipeline_tags_equal:
print("No updates, not pushing the metadata files.")
return
if commit_sha is not None:
commit_message = (
f"Update with commit {commit_sha}\n\nSee: https://github.com/huggingface/diffusers/commit/{commit_sha}"
)
else:
commit_message = "Update"
upload_folder(
repo_id="huggingface/diffusers-metadata",
folder_path=tmp_dir,
repo_type="dataset",
commit_message=commit_message,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--commit_sha", default=None, type=str, help="The sha of the commit going with this update.")
args = parser.parse_args()
update_metadata(args.commit_sha)
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility that updates the metadata of the Diffusers library in the repository `huggingface/diffusers-metadata`.
Usage for an update (as used by the GitHub action `update_metadata`):
```bash
python utils/update_metadata.py
```
Script modified from:
https://github.com/huggingface/transformers/blob/main/utils/update_metadata.py
"""
import argparse
import os
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from diffusers.pipelines.auto_pipeline import (
AUTO_IMAGE2IMAGE_PIPELINES_MAPPING,
AUTO_INPAINT_PIPELINES_MAPPING,
AUTO_TEXT2IMAGE_PIPELINES_MAPPING,
)
PIPELINE_TAG_JSON = "pipeline_tags.json"
def get_supported_pipeline_table() -> dict:
"""
Generates a dictionary containing the supported auto classes for each pipeline type,
using the content of the auto modules.
"""
# All supported pipelines for automatic mapping.
all_supported_pipeline_classes = [
(class_name.__name__, "text-to-image", "AutoPipelineForText2Image")
for _, class_name in AUTO_TEXT2IMAGE_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes += [
(class_name.__name__, "image-to-image", "AutoPipelineForImage2Image")
for _, class_name in AUTO_IMAGE2IMAGE_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes += [
(class_name.__name__, "image-to-image", "AutoPipelineForInpainting")
for _, class_name in AUTO_INPAINT_PIPELINES_MAPPING.items()
]
all_supported_pipeline_classes = list(set(all_supported_pipeline_classes))
all_supported_pipeline_classes.sort(key=lambda x: x[0])
data = {}
data["pipeline_class"] = [sample[0] for sample in all_supported_pipeline_classes]
data["pipeline_tag"] = [sample[1] for sample in all_supported_pipeline_classes]
data["auto_class"] = [sample[2] for sample in all_supported_pipeline_classes]
return data
def update_metadata(commit_sha: str):
"""
Update the metadata for the Diffusers repo in `huggingface/diffusers-metadata`.
Args:
commit_sha (`str`): The commit SHA on Diffusers corresponding to this update.
"""
pipelines_table = get_supported_pipeline_table()
pipelines_table = pd.DataFrame(pipelines_table)
pipelines_dataset = Dataset.from_pandas(pipelines_table)
hub_pipeline_tags_json = hf_hub_download(
repo_id="huggingface/diffusers-metadata",
filename=PIPELINE_TAG_JSON,
repo_type="dataset",
)
with open(hub_pipeline_tags_json) as f:
hub_pipeline_tags_json = f.read()
with tempfile.TemporaryDirectory() as tmp_dir:
pipelines_dataset.to_json(os.path.join(tmp_dir, PIPELINE_TAG_JSON))
with open(os.path.join(tmp_dir, PIPELINE_TAG_JSON)) as f:
pipeline_tags_json = f.read()
hub_pipeline_tags_equal = hub_pipeline_tags_json == pipeline_tags_json
if hub_pipeline_tags_equal:
print("No updates, not pushing the metadata files.")
return
if commit_sha is not None:
commit_message = (
f"Update with commit {commit_sha}\n\nSee: "
f"https://github.com/huggingface/diffusers/commit/{commit_sha}"
)
else:
commit_message = "Update"
upload_folder(
repo_id="huggingface/diffusers-metadata",
folder_path=tmp_dir,
repo_type="dataset",
commit_message=commit_message,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--commit_sha", default=None, type=str, help="The sha of the commit going with this update.")
args = parser.parse_args()
update_metadata(args.commit_sha)
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
_get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
_get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import _erase_image_pil, erase, erase_image, erase_video
from ._color import (
_adjust_brightness_image_pil,
_adjust_contrast_image_pil,
_adjust_gamma_image_pil,
_adjust_hue_image_pil,
_adjust_saturation_image_pil,
_adjust_sharpness_image_pil,
_autocontrast_image_pil,
_equalize_image_pil,
_invert_image_pil,
_permute_channels_image_pil,
_posterize_image_pil,
_rgb_to_grayscale_image_pil,
_solarize_image_pil,
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image,
adjust_gamma_video,
adjust_hue,
adjust_hue_image,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image,
adjust_sharpness_video,
autocontrast,
autocontrast_image,
autocontrast_video,
equalize,
equalize_image,
equalize_video,
invert,
invert_image,
invert_video,
permute_channels,
permute_channels_image,
permute_channels_video,
posterize,
posterize_image,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image,
solarize,
solarize_image,
solarize_video,
to_grayscale,
)
from ._geometry import (
_affine_image_pil,
_center_crop_image_pil,
_crop_image_pil,
_elastic_image_pil,
_five_crop_image_pil,
_horizontal_flip_image_pil,
_pad_image_pil,
_perspective_image_pil,
_resize_image_pil,
_resized_crop_image_pil,
_rotate_image_pil,
_ten_crop_image_pil,
_vertical_flip_image_pil,
affine,
affine_bounding_boxes,
affine_image,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
_gaussian_blur_image_pil,
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
gaussian_blur_video,
normalize,
normalize_image,
normalize_video,
to_dtype,
to_dtype_image,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image_tensor,
get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
permute_channels,
permute_channels_image_pil,
permute_channels_image_tensor,
permute_channels_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image_pil,
rgb_to_grayscale_image_tensor,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
to_grayscale,
)
from ._geometry import (
affine,
affine_bounding_boxes,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
convert_image_dtype,
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
to_dtype,
to_dtype_image_tensor,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
DATASETS_ON_HF_GCP = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def list_datasets_on_hf_gcp_parameters(with_config=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True))
class TestDatasetOnHfGcp(TestCase):
dataset = None
config_name = None
def test_dataset_info_available(self, dataset, config_name):
with TemporaryDirectory() as tmp_dir:
dataset_module = dataset_module_factory(dataset, cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name=config_name,
hash=dataset_module.hash,
)
dataset_info_url = os.path.join(
HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=False), config.DATASET_INFO_FILENAME
).replace(os.sep, "/")
datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir)
self.assertTrue(os.path.exists(datset_info_path))
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
DATASETS_ON_HF_GCP = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def list_datasets_on_hf_gcp_parameters(with_config=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True))
class TestDatasetOnHfGcp(TestCase):
dataset = None
config_name = None
def test_dataset_info_available(self, dataset, config_name):
with TemporaryDirectory() as tmp_dir:
dataset_module = dataset_module_factory(
os.path.join("datasets", dataset), cache_dir=tmp_dir, local_files_only=True
)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name=config_name,
hash=dataset_module.hash,
)
dataset_info_url = os.path.join(
HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=False), config.DATASET_INFO_FILENAME
).replace(os.sep, "/")
datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir)
self.assertTrue(os.path.exists(datset_info_path))
|
from __future__ import annotations
import sys
from .classification import CrossEncoderClassificationEvaluator
from .correlation import CrossEncoderCorrelationEvaluator
from .deprecated import (
CEBinaryAccuracyEvaluator,
CEBinaryClassificationEvaluator,
CECorrelationEvaluator,
CEF1Evaluator,
CERerankingEvaluator,
CESoftmaxAccuracyEvaluator,
)
from .nano_beir import CrossEncoderNanoBEIREvaluator
from .reranking import CrossEncoderRerankingEvaluator
# Ensure that imports using deprecated paths still work
# Although importing via `from sentence_transformers.cross_encoder.evaluation import ...` is recommended
deprecated_modules = [
"sentence_transformers.cross_encoder.evaluation.CEBinaryAccuracyEvaluator",
"sentence_transformers.cross_encoder.evaluation.CEBinaryClassificationEvaluator",
"sentence_transformers.cross_encoder.evaluation.CEF1Evaluator",
"sentence_transformers.cross_encoder.evaluation.CESoftmaxAccuracyEvaluator",
"sentence_transformers.cross_encoder.evaluation.CECorrelationEvaluator",
"sentence_transformers.cross_encoder.evaluation.CERerankingEvaluator",
]
for module in deprecated_modules:
sys.modules[module] = sys.modules["sentence_transformers.cross_encoder.evaluation.deprecated"]
__all__ = [
"CrossEncoderClassificationEvaluator",
"CrossEncoderCorrelationEvaluator",
"CrossEncoderRerankingEvaluator",
"CrossEncoderNanoBEIREvaluator",
# Deprecated:
"CERerankingEvaluator",
"CECorrelationEvaluator",
"CEBinaryAccuracyEvaluator",
"CEBinaryClassificationEvaluator",
"CEF1Evaluator",
"CESoftmaxAccuracyEvaluator",
]
|
from __future__ import annotations
# TODO: Consider renaming all evaluators to CrossEncoder..., e.g. CrossEncoderNanoBEIREvaluator, CrossEncoderClassificationEvaluator, etc.
from .CEBinaryAccuracyEvaluator import CEBinaryAccuracyEvaluator
from .CEBinaryClassificationEvaluator import CEBinaryClassificationEvaluator
from .CEClassificationEvaluator import CEClassificationEvaluator
from .CECorrelationEvaluator import CECorrelationEvaluator
from .CEF1Evaluator import CEF1Evaluator
from .CENanoBEIREvaluator import CENanoBEIREvaluator
from .CERerankingEvaluator import CERerankingEvaluator
from .CESoftmaxAccuracyEvaluator import CESoftmaxAccuracyEvaluator
__all__ = [
"CEClassificationEvaluator",
"CECorrelationEvaluator",
"CERerankingEvaluator",
"CENanoBEIREvaluator",
"CEBinaryAccuracyEvaluator", # Deprecated
"CEBinaryClassificationEvaluator", # Deprecated
"CEF1Evaluator", # Deprecated
"CESoftmaxAccuracyEvaluator", # Deprecated
]
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class FlaxControlNetPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_canny(self):
controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.bfloat16
)
pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16
)
params["controlnet"] = controlnet_params
prompts = "bird"
num_samples = jax.device_count()
prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
canny_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
)
processed_image = pipe.prepare_image_inputs([canny_image] * num_samples)
rng = jax.random.PRNGKey(0)
rng = jax.random.split(rng, jax.device_count())
p_params = replicate(params)
prompt_ids = shard(prompt_ids)
processed_image = shard(processed_image)
images = pipe(
prompt_ids=prompt_ids,
image=processed_image,
params=p_params,
prng_seed=rng,
num_inference_steps=50,
jit=True,
).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078]
)
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def test_pose(self):
controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose", from_pt=True, dtype=jnp.bfloat16
)
pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16
)
params["controlnet"] = controlnet_params
prompts = "Chef in the kitchen"
num_samples = jax.device_count()
prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
pose_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png"
)
processed_image = pipe.prepare_image_inputs([pose_image] * num_samples)
rng = jax.random.PRNGKey(0)
rng = jax.random.split(rng, jax.device_count())
p_params = replicate(params)
prompt_ids = shard(prompt_ids)
processed_image = shard(processed_image)
images = pipe(
prompt_ids=prompt_ids,
image=processed_image,
params=p_params,
prng_seed=rng,
num_inference_steps=50,
jit=True,
).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]]
)
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class FlaxControlNetPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_canny(self):
controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.bfloat16
)
pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16
)
params["controlnet"] = controlnet_params
prompts = "bird"
num_samples = jax.device_count()
prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
canny_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
)
processed_image = pipe.prepare_image_inputs([canny_image] * num_samples)
rng = jax.random.PRNGKey(0)
rng = jax.random.split(rng, jax.device_count())
p_params = replicate(params)
prompt_ids = shard(prompt_ids)
processed_image = shard(processed_image)
images = pipe(
prompt_ids=prompt_ids,
image=processed_image,
params=p_params,
prng_seed=rng,
num_inference_steps=50,
jit=True,
).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078]
)
print(f"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def test_pose(self):
controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose", from_pt=True, dtype=jnp.bfloat16
)
pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16
)
params["controlnet"] = controlnet_params
prompts = "Chef in the kitchen"
num_samples = jax.device_count()
prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
pose_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png"
)
processed_image = pipe.prepare_image_inputs([pose_image] * num_samples)
rng = jax.random.PRNGKey(0)
rng = jax.random.split(rng, jax.device_count())
p_params = replicate(params)
prompt_ids = shard(prompt_ids)
processed_image = shard(processed_image)
images = pipe(
prompt_ids=prompt_ids,
image=processed_image,
params=p_params,
prng_seed=rng,
num_inference_steps=50,
jit=True,
).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]]
)
print(f"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
|
from jina import Flow
import os
os.environ['JINA_LOG_LEVEL'] = 'DEBUG'
if __name__ == '__main__':
with Flow.load_config('flow.yml') as f:
f.block()
|
from jina import Flow
import os
os.environ['JINA_LOG_LEVEL'] = 'DEBUG'
if __name__ == '__main__':
with Flow.load_config('flow.yml') as f:
f.block()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
def skip_pipeline_steps(config):
config['pipeline'] = [
x for x in config.pipeline if x['type'] not in skip_type
]
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
if isinstance(train_data_cfg, Sequence):
[skip_pipeline_steps(c) for c in train_data_cfg]
else:
skip_pipeline_steps(train_data_cfg)
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
imshow_det_bboxes(
item['img'],
item['gt_bboxes'],
item['gt_labels'],
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=(255, 102, 61),
text_color=(255, 102, 61))
progress_bar.update()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from pathlib import Path
import mmcv
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
train_data_cfg['pipeline'] = [
x for x in train_data_cfg.pipeline if x['type'] not in skip_type
]
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
imshow_det_bboxes(
item['img'],
item['gt_bboxes'],
item['gt_labels'],
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=(255, 102, 61),
text_color=(255, 102, 61))
progress_bar.update()
if __name__ == '__main__':
main()
|
from __future__ import annotations
__version__ = "4.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
import warnings
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.sampler import DefaultBatchSampler, MultiDatasetDefaultBatchSampler
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
# Globally silence PyTorch sparse CSR tensor beta warning
warnings.filterwarnings("ignore", message="Sparse CSR tensor support is in beta state")
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"DefaultBatchSampler",
"MultiDatasetDefaultBatchSampler",
"mine_hard_negatives",
]
|
from __future__ import annotations
__version__ = "4.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
import warnings
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
# Globally silence PyTorch sparse CSR tensor beta warning
warnings.filterwarnings("ignore", message="Sparse CSR tensor support is in beta state")
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
]
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap
ALL_OBJECTS = {
DTypePolicy,
FloatDTypePolicy,
QuantizedDTypePolicy,
QuantizedFloat8DTypePolicy,
DTypePolicyMap,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
@keras_export("keras.dtype_policies.serialize")
def serialize(dtype_policy):
"""Serializes `DTypePolicy` instance.
Args:
dtype_policy: A Keras `DTypePolicy` instance.
Returns:
`DTypePolicy` configuration dictionary.
"""
from keras.src.saving import serialization_lib
return serialization_lib.serialize_keras_object(dtype_policy)
@keras_export("keras.dtype_policies.deserialize")
def deserialize(config, custom_objects=None):
"""Deserializes a serialized `DTypePolicy` instance.
Args:
config: `DTypePolicy` configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.saving import serialization_lib
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.dtype_policies.get")
def get(identifier):
"""Retrieves a Keras `DTypePolicy` instance.
The `identifier` may be the string name of a `DTypePolicy` class.
>>> policy = dtype_policies.get("mixed_bfloat16")
>>> type(policy)
<class '...FloatDTypePolicy'>
You can also specify `config` of the dtype policy to this function by
passing dict containing `class_name` and `config` as an identifier. Also
note that the `class_name` must map to a `DTypePolicy` class
>>> identifier = {"class_name": "FloatDTypePolicy",
... "config": {"name": "float32"}}
>>> policy = dtype_policies.get(identifier)
>>> type(policy)
<class '...FloatDTypePolicy'>
Args:
identifier: A dtype policy identifier. One of `None` or string name of a
`DTypePolicy` or `DTypePolicy` configuration dictionary or a
`DTypePolicy` instance.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.dtype_policies.dtype_policy import (
_get_quantized_dtype_policy_by_str,
)
if identifier is None:
return dtype_policy.dtype_policy()
if isinstance(identifier, DTypePolicy):
return identifier
if isinstance(identifier, dict):
return deserialize(identifier)
if isinstance(identifier, str):
if identifier.startswith(QUANTIZATION_MODES):
return _get_quantized_dtype_policy_by_str(identifier)
else:
return FloatDTypePolicy(identifier)
try:
return FloatDTypePolicy(backend.standardize_dtype(identifier))
except:
raise ValueError(
"Cannot interpret `dtype` argument. Expected a string "
f"or an instance of DTypePolicy. Received: dtype={identifier}"
)
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
ALL_OBJECTS = {
DTypePolicy,
FloatDTypePolicy,
QuantizedDTypePolicy,
QuantizedFloat8DTypePolicy,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
@keras_export("keras.dtype_policies.serialize")
def serialize(dtype_policy):
"""Serializes `DTypePolicy` instance.
Args:
dtype_policy: A Keras `DTypePolicy` instance.
Returns:
`DTypePolicy` configuration dictionary.
"""
from keras.src.saving import serialization_lib
return serialization_lib.serialize_keras_object(dtype_policy)
@keras_export("keras.dtype_policies.deserialize")
def deserialize(config, custom_objects=None):
"""Deserializes a serialized `DTypePolicy` instance.
Args:
config: `DTypePolicy` configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.saving import serialization_lib
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.dtype_policies.get")
def get(identifier):
"""Retrieves a Keras `DTypePolicy` instance.
The `identifier` may be the string name of a `DTypePolicy` class.
>>> policy = dtype_policies.get("mixed_bfloat16")
>>> type(policy)
<class '...FloatDTypePolicy'>
You can also specify `config` of the dtype policy to this function by
passing dict containing `class_name` and `config` as an identifier. Also
note that the `class_name` must map to a `DTypePolicy` class
>>> identifier = {"class_name": "FloatDTypePolicy",
... "config": {"name": "float32"}}
>>> policy = dtype_policies.get(identifier)
>>> type(policy)
<class '...FloatDTypePolicy'>
Args:
identifier: A dtype policy identifier. One of `None` or string name of a
`DTypePolicy` or `DTypePolicy` configuration dictionary or a
`DTypePolicy` instance.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.dtype_policies.dtype_policy import (
_get_quantized_dtype_policy_by_str,
)
if identifier is None:
return dtype_policy.dtype_policy()
if isinstance(identifier, DTypePolicy):
return identifier
if isinstance(identifier, dict):
return deserialize(identifier)
if isinstance(identifier, str):
if identifier.startswith(QUANTIZATION_MODES):
return _get_quantized_dtype_policy_by_str(identifier)
else:
return FloatDTypePolicy(identifier)
try:
return FloatDTypePolicy(backend.standardize_dtype(identifier))
except:
raise ValueError(
"Cannot interpret `dtype` argument. Expected a string "
f"or an instance of DTypePolicy. Received: dtype={identifier}"
)
|
from __future__ import annotations
from .DenoisingAutoEncoderDataset import DenoisingAutoEncoderDataset
from .NoDuplicatesDataLoader import NoDuplicatesDataLoader
from .ParallelSentencesDataset import ParallelSentencesDataset
from .SentenceLabelDataset import SentenceLabelDataset
from .SentencesDataset import SentencesDataset
__all__ = [
"DenoisingAutoEncoderDataset",
"NoDuplicatesDataLoader",
"ParallelSentencesDataset",
"SentencesDataset",
"SentenceLabelDataset",
]
|
from .DenoisingAutoEncoderDataset import DenoisingAutoEncoderDataset
from .NoDuplicatesDataLoader import NoDuplicatesDataLoader
from .ParallelSentencesDataset import ParallelSentencesDataset
from .SentenceLabelDataset import SentenceLabelDataset
from .SentencesDataset import SentencesDataset
__all__ = [
"DenoisingAutoEncoderDataset",
"NoDuplicatesDataLoader",
"ParallelSentencesDataset",
"SentencesDataset",
"SentenceLabelDataset",
]
|
"""Firestore Reader."""
from typing import Any, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
DEFAULT_FIRESTORE_DATABASE = "(default)"
USER_AGENT = "LlamaHub"
IMPORT_ERROR_MSG = (
"`firestore` package not found, please run `pip3 install google-cloud-firestore`"
)
class FirestoreReader(BaseReader):
"""
Simple Firestore reader.
Args:
project_id (str): The Google Cloud Project ID.
*args (Optional[Any]): Additional arguments.
**kwargs (Optional[Any]): Additional keyword arguments.
Returns:
FirestoreReader: A FirestoreReader object.
"""
def __init__(
self,
project_id: str,
database_id: str = DEFAULT_FIRESTORE_DATABASE,
*args: Optional[Any],
**kwargs: Optional[Any],
) -> None:
"""Initialize with parameters."""
try:
from google.cloud import firestore
from google.cloud.firestore_v1.services.firestore.transports.base import (
DEFAULT_CLIENT_INFO,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MSG)
client_info = DEFAULT_CLIENT_INFO
client_info.user_agent = USER_AGENT
self.db = firestore.Client(
project=project_id, database=database_id, client_info=client_info
)
def load_data(self, collection: str) -> List[Document]:
"""
Load data from a Firestore collection, returning a list of Documents.
Args:
collection (str): The name of the Firestore collection to read from.
Returns:
List[Document]: A list of Document objects.
"""
documents = []
col_ref = self.db.collection(collection)
for doc in col_ref.stream():
doc_str = ", ".join([f"{k}: {v}" for k, v in doc.to_dict().items()])
documents.append(Document(text=doc_str))
return documents
def load_document(self, document_url: str) -> Document:
"""
Load a single document from Firestore.
Args:
document_url (str): The absolute path to the Firestore document to read.
Returns:
Document: A Document object.
"""
parts = document_url.split("/")
if len(parts) % 2 != 0:
raise ValueError(f"Invalid document URL: {document_url}")
ref = self.db.collection(parts[0])
for i in range(1, len(parts)):
if i % 2 == 0:
ref = ref.collection(parts[i])
else:
ref = ref.document(parts[i])
doc = ref.get()
if not doc.exists:
raise ValueError(f"No such document: {document_url}")
doc_str = ", ".join([f"{k}: {v}" for k, v in doc.to_dict().items()])
return Document(text=doc_str)
|
"""Firestore Reader."""
from typing import Any, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
DEFAULT_FIRESTORE_DATABASE = "(default)"
USER_AGENT = "LlamaHub"
IMPORT_ERROR_MSG = (
"`firestore` package not found, please run `pip3 install google-cloud-firestore`"
)
class FirestoreReader(BaseReader):
"""Simple Firestore reader.
Args:
project_id (str): The Google Cloud Project ID.
*args (Optional[Any]): Additional arguments.
**kwargs (Optional[Any]): Additional keyword arguments.
Returns:
FirestoreReader: A FirestoreReader object.
"""
def __init__(
self,
project_id: str,
database_id: str = DEFAULT_FIRESTORE_DATABASE,
*args: Optional[Any],
**kwargs: Optional[Any],
) -> None:
"""Initialize with parameters."""
try:
from google.cloud import firestore
from google.cloud.firestore_v1.services.firestore.transports.base import (
DEFAULT_CLIENT_INFO,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MSG)
client_info = DEFAULT_CLIENT_INFO
client_info.user_agent = USER_AGENT
self.db = firestore.Client(
project=project_id, database=database_id, client_info=client_info
)
def load_data(self, collection: str) -> List[Document]:
"""Load data from a Firestore collection, returning a list of Documents.
Args:
collection (str): The name of the Firestore collection to read from.
Returns:
List[Document]: A list of Document objects.
"""
documents = []
col_ref = self.db.collection(collection)
for doc in col_ref.stream():
doc_str = ", ".join([f"{k}: {v}" for k, v in doc.to_dict().items()])
documents.append(Document(text=doc_str))
return documents
def load_document(self, document_url: str) -> Document:
"""Load a single document from Firestore.
Args:
document_url (str): The absolute path to the Firestore document to read.
Returns:
Document: A Document object.
"""
parts = document_url.split("/")
if len(parts) % 2 != 0:
raise ValueError(f"Invalid document URL: {document_url}")
ref = self.db.collection(parts[0])
for i in range(1, len(parts)):
if i % 2 == 0:
ref = ref.collection(parts[i])
else:
ref = ref.document(parts[i])
doc = ref.get()
if not doc.exists:
raise ValueError(f"No such document: {document_url}")
doc_str = ", ".join([f"{k}: {v}" for k, v in doc.to_dict().items()])
return Document(text=doc_str)
|
import csv
import os
import random
import string
from pathlib import Path
from torchaudio.datasets import fluentcommands
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
HEADER = ["", "path", "speakerId", "transcription", "action", "object", "location"]
SLOTS = ["action", "object", "location"]
ACTIONS = ["activate", "deactivate"]
OBJECTS = ["lights", "volume"]
LOCATIONS = ["none", "kitchen", "bedroom"]
NUM_SPEAKERS = 5
SAMPLES_PER_SPEAKER = 10
SAMPLE_RATE = 16000
def _gen_rand_str(n: int, seed: int):
random.seed(seed)
return "".join(random.choices(string.ascii_letters + string.digits, k=n))
def _gen_csv(dataset_dir: str, subset: str, init_seed: int):
data = []
data.append(HEADER)
idx = 0
seed = init_seed
for _ in range(NUM_SPEAKERS):
speaker_id = _gen_rand_str(5, seed=seed)
speaker_dir = os.path.join(dataset_dir, "wavs", "speakers", speaker_id)
os.makedirs(speaker_dir, exist_ok=True)
for _ in range(SAMPLES_PER_SPEAKER):
seed += 1
filename = _gen_rand_str(10, seed=seed)
path = f"wavs/speakers/{speaker_id}/{filename}.wav"
random.seed(seed)
transcription = ""
act = random.choice(ACTIONS)
obj = random.choice(OBJECTS)
loc = random.choice(LOCATIONS)
data.append([idx, path, speaker_id, transcription, act, obj, loc])
idx += 1
csv_path = os.path.join(dataset_dir, "data", f"{subset}_data.csv")
with open(csv_path, "w") as csv_file:
file_writer = csv.writer(csv_file)
file_writer.writerows(data)
return data
def _save_samples(dataset_dir: str, subset: str, seed: int):
# generate csv file
data = _gen_csv(dataset_dir, subset, seed)
# iterate through csv file, save wavs to corresponding files
header = data[0]
data = data[1:] # remove header
path_idx = header.index("path")
samples = []
for row in data:
wav = get_whitenoise(
sample_rate=SAMPLE_RATE,
duration=0.01,
n_channels=1,
seed=seed,
)
filename = row[path_idx]
wav_file = os.path.join(dataset_dir, filename)
save_wav(wav_file, wav, SAMPLE_RATE)
path = Path(wav_file).stem
speaker_id, transcription, act, obj, loc = row[2:]
sample = wav, SAMPLE_RATE, path, speaker_id, transcription, act, obj, loc
samples.append(sample)
seed += 1
return samples
def get_mock_dataset(dataset_dir: str):
data_folder = os.path.join(dataset_dir, "data")
wav_folder = os.path.join(dataset_dir, "wavs", "speakers")
os.makedirs(data_folder, exist_ok=True)
os.makedirs(wav_folder, exist_ok=True)
mocked_train_samples = _save_samples(dataset_dir, "train", 1)
mocked_valid_samples = _save_samples(dataset_dir, "valid", 111)
mocked_test_samples = _save_samples(dataset_dir, "test", 1111)
return mocked_train_samples, mocked_valid_samples, mocked_test_samples
class TestFluentSpeechCommands(TempDirMixin, TorchaudioTestCase):
root_dir = None
backend = "default"
mocked_train_samples = []
mocked_valid_samples = []
mocked_test_samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
dataset_dir = os.path.join(cls.root_dir, "fluent_speech_commands_dataset")
(
cls.mocked_train_samples,
cls.mocked_valid_samples,
cls.mocked_test_samples,
) = get_mock_dataset(dataset_dir)
def _testFluentCommands(self, dataset, samples):
num_samples = 0
for i, data in enumerate(dataset):
self.assertEqual(data, samples[i])
num_samples += 1
assert num_samples == len(samples)
def testFluentCommandsTrain(self):
dataset = fluentcommands.FluentSpeechCommands(self.root_dir, subset="train")
self._testFluentCommands(dataset, self.mocked_train_samples)
def testFluentCommandsValid(self):
dataset = fluentcommands.FluentSpeechCommands(self.root_dir, subset="valid")
self._testFluentCommands(dataset, self.mocked_valid_samples)
def testFluentCommandsTest(self):
dataset = fluentcommands.FluentSpeechCommands(self.root_dir, subset="test")
self._testFluentCommands(dataset, self.mocked_test_samples)
|
import csv
import os
import random
import string
from pathlib import Path
from torchaudio.datasets import fluentcommands
from torchaudio_unittest.common_utils import (
get_whitenoise,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
HEADER = ["", "path", "speakerId", "transcription", "action", "object", "location"]
SLOTS = ["action", "object", "location"]
ACTIONS = ["activate", "deactivate"]
OBJECTS = ["lights", "volume"]
LOCATIONS = ["none", "kitchen", "bedroom"]
NUM_SPEAKERS = 5
SAMPLES_PER_SPEAKER = 10
SAMPLE_RATE = 16000
def _gen_rand_str(n: int, seed: int):
random.seed(seed)
return "".join(random.choices(string.ascii_letters + string.digits, k=n))
def _gen_csv(dataset_dir: str, subset: str, init_seed: int):
data = []
data.append(HEADER)
idx = 0
seed = init_seed
for _ in range(NUM_SPEAKERS):
speaker_id = _gen_rand_str(5, seed=seed)
speaker_dir = os.path.join(dataset_dir, "wavs", "speakers", speaker_id)
os.makedirs(speaker_dir, exist_ok=True)
for _ in range(SAMPLES_PER_SPEAKER):
seed += 1
filename = _gen_rand_str(10, seed=seed)
path = f"wavs/speakers/{speaker_id}/{filename}.wav"
random.seed(seed)
transcription = ""
act = random.choice(ACTIONS)
obj = random.choice(OBJECTS)
loc = random.choice(LOCATIONS)
data.append([idx, path, speaker_id, transcription, act, obj, loc])
idx += 1
csv_path = os.path.join(dataset_dir, "data", f"{subset}_data.csv")
with open(csv_path, "w") as csv_file:
file_writer = csv.writer(csv_file)
file_writer.writerows(data)
return data
def _save_samples(dataset_dir: str, subset: str, seed: int):
# generate csv file
data = _gen_csv(dataset_dir, subset, seed)
# iterate through csv file, save wavs to corresponding files
header = data[0]
data = data[1:] # remove header
path_idx = header.index("path")
samples = []
for row in data:
wav = get_whitenoise(
sample_rate=SAMPLE_RATE,
duration=0.01,
n_channels=1,
seed=seed,
)
filename = row[path_idx]
wav_file = os.path.join(dataset_dir, filename)
save_wav(wav_file, wav, SAMPLE_RATE)
path = Path(wav_file).stem
speaker_id, transcription, act, obj, loc = row[2:]
sample = wav, SAMPLE_RATE, path, speaker_id, transcription, act, obj, loc
samples.append(sample)
seed += 1
return samples
def get_mock_dataset(dataset_dir: str):
data_folder = os.path.join(dataset_dir, "data")
wav_folder = os.path.join(dataset_dir, "wavs", "speakers")
os.makedirs(data_folder, exist_ok=True)
os.makedirs(wav_folder, exist_ok=True)
mocked_train_samples = _save_samples(dataset_dir, "train", 1)
mocked_valid_samples = _save_samples(dataset_dir, "valid", 111)
mocked_test_samples = _save_samples(dataset_dir, "test", 1111)
return mocked_train_samples, mocked_valid_samples, mocked_test_samples
class TestFluentSpeechCommands(TempDirMixin, TorchaudioTestCase):
root_dir = None
backend = "default"
mocked_train_samples = []
mocked_valid_samples = []
mocked_test_samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
dataset_dir = os.path.join(cls.root_dir, "fluent_speech_commands_dataset")
(
cls.mocked_train_samples,
cls.mocked_valid_samples,
cls.mocked_test_samples,
) = get_mock_dataset(dataset_dir)
def _testFluentCommands(self, dataset, samples):
num_samples = 0
for i, data in enumerate(dataset):
self.assertEqual(data, samples[i])
num_samples += 1
assert num_samples == len(samples)
def testFluentCommandsTrain(self):
dataset = fluentcommands.FluentSpeechCommands(self.root_dir, subset="train")
self._testFluentCommands(dataset, self.mocked_train_samples)
def testFluentCommandsValid(self):
dataset = fluentcommands.FluentSpeechCommands(self.root_dir, subset="valid")
self._testFluentCommands(dataset, self.mocked_valid_samples)
def testFluentCommandsTest(self):
dataset = fluentcommands.FluentSpeechCommands(self.root_dir, subset="test")
self._testFluentCommands(dataset, self.mocked_test_samples)
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import pickle
import warnings
from collections import OrderedDict
import torch
import torch.distributed as dist
from mmcv.runner import OptimizerHook, get_dist_info
from torch._utils import (_flatten_dense_tensors, _take_tensors,
_unflatten_dense_tensors)
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
"""Allreduce gradients.
Args:
params (list[torch.Parameters]): List of parameters of a model
coalesce (bool, optional): Whether allreduce parameters as a whole.
Defaults to True.
bucket_size_mb (int, optional): Size of bucket, the unit is MB.
Defaults to -1.
"""
grads = [
param.grad.data for param in params
if param.requires_grad and param.grad is not None
]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
class DistOptimizerHook(OptimizerHook):
"""Deprecated optimizer hook for distributed training."""
def __init__(self, *args, **kwargs):
warnings.warn('"DistOptimizerHook" is deprecated, please switch to'
'"mmcv.runner.OptimizerHook".')
super().__init__(*args, **kwargs)
def reduce_mean(tensor):
""""Obtain the mean of tensor on different GPUs."""
if not (dist.is_available() and dist.is_initialized()):
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
return tensor
def obj2tensor(pyobj, device='cuda'):
"""Serialize picklable python object to tensor."""
storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj))
return torch.ByteTensor(storage).to(device=device)
def tensor2obj(tensor):
"""Deserialize tensor to picklable python object."""
return pickle.loads(tensor.cpu().numpy().tobytes())
@functools.lru_cache()
def _get_global_gloo_group():
"""Return a process group based on gloo backend, containing all the ranks
The result is cached."""
if dist.get_backend() == 'nccl':
return dist.new_group(backend='gloo')
else:
return dist.group.WORLD
def all_reduce_dict(py_dict, op='sum', group=None, to_float=True):
"""Apply all reduce function for python dict object.
The code is modified from https://github.com/Megvii-
BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py.
NOTE: make sure that py_dict in different ranks has the same keys and
the values should be in the same shape.
Args:
py_dict (dict): Dict to be applied all reduce op.
op (str): Operator, could be 'sum' or 'mean'. Default: 'sum'
group (:obj:`torch.distributed.group`, optional): Distributed group,
Default: None.
to_float (bool): Whether to convert all values of dict to float.
Default: True.
Returns:
OrderedDict: reduced python dict object.
"""
_, world_size = get_dist_info()
if world_size == 1:
return py_dict
if group is None:
# TODO: May try not to use gloo in the future
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return py_dict
# all reduce logic across different devices.
py_key = list(py_dict.keys())
py_key_tensor = obj2tensor(py_key)
dist.broadcast(py_key_tensor, src=0)
py_key = tensor2obj(py_key_tensor)
tensor_shapes = [py_dict[k].shape for k in py_key]
tensor_numels = [py_dict[k].numel() for k in py_key]
if to_float:
flatten_tensor = torch.cat(
[py_dict[k].flatten().float() for k in py_key])
else:
flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key])
dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM)
if op == 'mean':
flatten_tensor /= world_size
split_tensors = [
x.reshape(shape) for x, shape in zip(
torch.split(flatten_tensor, tensor_numels), tensor_shapes)
]
return OrderedDict({k: v for k, v in zip(py_key, split_tensors)})
|
import functools
import pickle
import warnings
from collections import OrderedDict
import torch
import torch.distributed as dist
from mmcv.runner import OptimizerHook, get_dist_info
from torch._utils import (_flatten_dense_tensors, _take_tensors,
_unflatten_dense_tensors)
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
"""Allreduce gradients.
Args:
params (list[torch.Parameters]): List of parameters of a model
coalesce (bool, optional): Whether allreduce parameters as a whole.
Defaults to True.
bucket_size_mb (int, optional): Size of bucket, the unit is MB.
Defaults to -1.
"""
grads = [
param.grad.data for param in params
if param.requires_grad and param.grad is not None
]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
class DistOptimizerHook(OptimizerHook):
"""Deprecated optimizer hook for distributed training."""
def __init__(self, *args, **kwargs):
warnings.warn('"DistOptimizerHook" is deprecated, please switch to'
'"mmcv.runner.OptimizerHook".')
super().__init__(*args, **kwargs)
def reduce_mean(tensor):
""""Obtain the mean of tensor on different GPUs."""
if not (dist.is_available() and dist.is_initialized()):
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
return tensor
def obj2tensor(pyobj, device='cuda'):
"""Serialize picklable python object to tensor."""
storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj))
return torch.ByteTensor(storage).to(device=device)
def tensor2obj(tensor):
"""Deserialize tensor to picklable python object."""
return pickle.loads(tensor.cpu().numpy().tobytes())
@functools.lru_cache()
def _get_global_gloo_group():
"""Return a process group based on gloo backend, containing all the ranks
The result is cached."""
if dist.get_backend() == 'nccl':
return dist.new_group(backend='gloo')
else:
return dist.group.WORLD
def all_reduce_dict(py_dict, op='sum', group=None, to_float=True):
"""Apply all reduce function for python dict object.
The code is modified from https://github.com/Megvii-
BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py.
NOTE: make sure that py_dict in different ranks has the same keys and
the values should be in the same shape.
Args:
py_dict (dict): Dict to be applied all reduce op.
op (str): Operator, could be 'sum' or 'mean'. Default: 'sum'
group (:obj:`torch.distributed.group`, optional): Distributed group,
Default: None.
to_float (bool): Whether to convert all values of dict to float.
Default: True.
Returns:
OrderedDict: reduced python dict object.
"""
_, world_size = get_dist_info()
if world_size == 1:
return py_dict
if group is None:
# TODO: May try not to use gloo in the future
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return py_dict
# all reduce logic across different devices.
py_key = list(py_dict.keys())
py_key_tensor = obj2tensor(py_key)
dist.broadcast(py_key_tensor, src=0)
py_key = tensor2obj(py_key_tensor)
tensor_shapes = [py_dict[k].shape for k in py_key]
tensor_numels = [py_dict[k].numel() for k in py_key]
if to_float:
flatten_tensor = torch.cat(
[py_dict[k].flatten().float() for k in py_key])
else:
flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key])
dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM)
if op == 'mean':
flatten_tensor /= world_size
split_tensors = [
x.reshape(shape) for x, shape in zip(
torch.split(flatten_tensor, tensor_numels), tensor_shapes)
]
return OrderedDict({k: v for k, v in zip(py_key, split_tensors)})
|
import concurrent.futures
import importlib
import subprocess
from pathlib import Path
def test_importable_all() -> None:
for path in Path("../core/langchain_core/").glob("*"):
module_name = path.stem
if not module_name.startswith(".") and path.suffix != ".typed":
module = importlib.import_module("langchain_core." + module_name)
all_ = getattr(module, "__all__", [])
for cls_ in all_:
getattr(module, cls_)
def try_to_import(module_name: str) -> tuple[int, str]:
"""Try to import a module via subprocess."""
module = importlib.import_module("langchain_core." + module_name)
all_ = getattr(module, "__all__", [])
for cls_ in all_:
getattr(module, cls_)
result = subprocess.run(
["python", "-c", f"import langchain_core.{module_name}"], check=True
)
return result.returncode, module_name
def test_importable_all_via_subprocess() -> None:
"""Test import in isolation.
Note: ImportErrors due to circular imports can be raised
for one sequence of imports but not another.
"""
module_names = []
for path in Path("../core/langchain_core/").glob("*"):
module_name = path.stem
if not module_name.startswith(".") and path.suffix != ".typed":
module_names.append(module_name)
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
futures = [
executor.submit(try_to_import, module_name) for module_name in module_names
]
for future in concurrent.futures.as_completed(futures):
result = future.result() # Will raise an exception if the callable raised
code, module_name = result
if code != 0:
msg = f"Failed to import {module_name}."
raise ValueError(msg)
|
import concurrent.futures
import importlib
import subprocess
from pathlib import Path
def test_importable_all() -> None:
for path in Path("../core/langchain_core/").glob("*"):
module_name = path.stem
if not module_name.startswith(".") and path.suffix != ".typed":
module = importlib.import_module("langchain_core." + module_name)
all_ = getattr(module, "__all__", [])
for cls_ in all_:
getattr(module, cls_)
def try_to_import(module_name: str) -> tuple[int, str]:
"""Try to import a module via subprocess."""
module = importlib.import_module("langchain_core." + module_name)
all_ = getattr(module, "__all__", [])
for cls_ in all_:
getattr(module, cls_)
result = subprocess.run(
["python", "-c", f"import langchain_core.{module_name}"],
)
return result.returncode, module_name
def test_importable_all_via_subprocess() -> None:
"""Test import in isolation.
Note: ImportErrors due to circular imports can be raised
for one sequence of imports but not another.
"""
module_names = []
for path in Path("../core/langchain_core/").glob("*"):
module_name = path.stem
if not module_name.startswith(".") and path.suffix != ".typed":
module_names.append(module_name)
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
futures = [
executor.submit(try_to_import, module_name) for module_name in module_names
]
for future in concurrent.futures.as_completed(futures):
result = future.result() # Will raise an exception if the callable raised
code, module_name = result
if code != 0:
msg = f"Failed to import {module_name}."
raise ValueError(msg)
|
import os
import numpy as np
import pytest
from docarray import Document
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_single_doc_summary():
# empty doc
Document().summary()
# nested doc
Document(
chunks=[
Document(),
Document(chunks=[Document()]),
Document(),
],
matches=[Document(), Document()],
).summary()
def test_plot_image():
d = Document(uri=os.path.join(cur_dir, 'toydata/test.png'))
d.display()
d.display(from_='uri')
d.load_uri_to_image_tensor()
d.uri = None
d.display()
d.display(from_='tensor')
def test_plot_image_wrong():
d = Document(uri=os.path.join(cur_dir, 'toydata/test.png'))
with pytest.raises(ValueError):
d.display(from_='tensor')
d.load_uri_to_image_tensor()
d.uri = None
with pytest.raises(ValueError):
d.display(from_='uri')
def test_plot_audio():
d = Document(uri=os.path.join(cur_dir, 'toydata/hello.wav'))
d.display()
d.convert_uri_to_datauri()
d.display()
def test_plot_video():
d = Document(uri=os.path.join(cur_dir, 'toydata/mov_bbb.mp4'))
d.display()
d.convert_uri_to_datauri()
d.display()
def test_plot_embedding():
d = Document(embedding=[1, 2, 3], tensor=np.random.random(128))
d.summary()
c = Document(embedding=[1, 2, 3], tensor=np.random.random(128))
d.chunks.append(c)
d.summary()
|
import os
import numpy as np
from docarray import Document
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_single_doc_summary():
# empty doc
Document().summary()
# nested doc
Document(
chunks=[
Document(),
Document(chunks=[Document()]),
Document(),
],
matches=[Document(), Document()],
).summary()
def test_plot_image():
d = Document(uri=os.path.join(cur_dir, 'toydata/test.png'))
d.display()
d.load_uri_to_image_tensor()
d.uri = None
d.display()
def test_plot_audio():
d = Document(uri=os.path.join(cur_dir, 'toydata/hello.wav'))
d.display()
d.convert_uri_to_datauri()
d.display()
def test_plot_video():
d = Document(uri=os.path.join(cur_dir, 'toydata/mov_bbb.mp4'))
d.display()
d.convert_uri_to_datauri()
d.display()
def test_plot_embedding():
d = Document(embedding=[1, 2, 3], tensor=np.random.random(128))
d.summary()
c = Document(embedding=[1, 2, 3], tensor=np.random.random(128))
d.chunks.append(c)
d.summary()
|
"""Flat reader."""
from fsspec import AbstractFileSystem
from fsspec.implementations.local import LocalFileSystem
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class FlatReader(BaseReader):
"""Flat reader.
Extract raw text from a file and save the file type in the metadata
"""
def __init__(
self,
*args: Any,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def _get_fs(
self, file: Path, fs: Optional[AbstractFileSystem] = None
) -> AbstractFileSystem:
if fs is None:
fs = LocalFileSystem()
return fs
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file into string."""
fs = self._get_fs(file, fs)
with fs.open(file, encoding="utf-8") as f:
content = f.read()
metadata = {"filename": file.name, "extension": file.suffix}
if extra_info:
metadata = {**metadata, **extra_info}
return [Document(text=content, metadata=metadata)]
|
"""Flat reader."""
from pathlib import Path
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class FlatReader(BaseReader):
"""Flat reader.
Extract raw text from a file and save the file type in the metadata
"""
def __init__(
self,
*args: Any,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""Parse file into string."""
with open(file, encoding="utf-8") as f:
content = f.read()
metadata = {"filename": file.name, "extension": file.suffix}
if extra_info:
metadata = {**metadata, **extra_info}
return [Document(text=content, metadata=metadata)]
|
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
from unittest import TestCase
from unittest.mock import Mock
from mmcv.cnn import VGG
from mmengine.dataset import BaseDataset
from torch import nn
from mmdet.engine.hooks import NumClassCheckHook
from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
class TestNumClassCheckHook(TestCase):
def setUp(self):
# Setup NumClassCheckHook
hook = NumClassCheckHook()
self.hook = hook
# Setup runner mock
runner = Mock()
runner.model = Mock()
runner.logger = Mock()
runner.logger.warning = Mock()
runner.train_dataloader = Mock()
runner.val_dataloader = Mock()
self.runner = runner
# Setup dataset
metainfo = dict(classes=None)
self.none_classmeta_dataset = BaseDataset(
metainfo=metainfo, lazy_init=True)
metainfo = dict(classes='class_name')
self.str_classmeta_dataset = BaseDataset(
metainfo=metainfo, lazy_init=True)
metainfo = dict(classes=('bus', 'car'))
self.normal_classmeta_dataset = BaseDataset(
metainfo=metainfo, lazy_init=True)
# Setup valid model
valid_model = nn.Module()
valid_model.add_module('backbone', VGG(depth=11))
fused_semantic_head = FusedSemanticHead(
num_ins=1,
fusion_level=0,
num_convs=1,
in_channels=1,
conv_out_channels=1)
valid_model.add_module('semantic_head', fused_semantic_head)
rpn_head = nn.Module()
rpn_head.num_classes = 1
valid_model.add_module('rpn_head', rpn_head)
bbox_head = nn.Module()
bbox_head.num_classes = 2
valid_model.add_module('bbox_head', bbox_head)
self.valid_model = valid_model
# Setup invalid model
invalid_model = nn.Module()
bbox_head = nn.Module()
bbox_head.num_classes = 4
invalid_model.add_module('bbox_head', bbox_head)
self.invalid_model = invalid_model
def test_before_train_epch(self):
runner = deepcopy(self.runner)
# Test when dataset.metainfo['classes'] is None
runner.train_dataloader.dataset = self.none_classmeta_dataset
self.hook.before_train_epoch(runner)
runner.logger.warning.assert_called_once()
# Test when dataset.metainfo['classes'] is a str
runner.train_dataloader.dataset = self.str_classmeta_dataset
with self.assertRaises(AssertionError):
self.hook.before_train_epoch(runner)
runner.train_dataloader.dataset = self.normal_classmeta_dataset
# Test `num_classes` of model is compatible with dataset
runner.model = self.valid_model
self.hook.before_train_epoch(runner)
# Test `num_classes` of model is not compatible with dataset
runner.model = self.invalid_model
with self.assertRaises(AssertionError):
self.hook.before_train_epoch(runner)
def test_before_val_epoch(self):
runner = deepcopy(self.runner)
# Test when dataset.metainfo['classes'] is None
runner.val_dataloader.dataset = self.none_classmeta_dataset
self.hook.before_val_epoch(runner)
runner.logger.warning.assert_called_once()
# Test when dataset.metainfo['classes'] is a str
runner.val_dataloader.dataset = self.str_classmeta_dataset
with self.assertRaises(AssertionError):
self.hook.before_val_epoch(runner)
runner.val_dataloader.dataset = self.normal_classmeta_dataset
# Test `num_classes` of model is compatible with dataset
runner.model = self.valid_model
self.hook.before_val_epoch(runner)
# Test `num_classes` of model is not compatible with dataset
runner.model = self.invalid_model
with self.assertRaises(AssertionError):
self.hook.before_val_epoch(runner)
|
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
from unittest import TestCase
from unittest.mock import Mock
from mmcv.cnn import VGG
from mmengine.dataset import BaseDataset
from torch import nn
from mmdet.engine.hooks import NumClassCheckHook
from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
class TestNumClassCheckHook(TestCase):
def setUp(self):
# Setup NumClassCheckHook
hook = NumClassCheckHook()
self.hook = hook
# Setup runner mock
runner = Mock()
runner.model = Mock()
runner.logger = Mock()
runner.logger.warning = Mock()
runner.train_dataloader = Mock()
runner.val_dataloader = Mock()
self.runner = runner
# Setup dataset
metainfo = dict(CLASSES=None)
self.none_classmeta_dataset = BaseDataset(
metainfo=metainfo, lazy_init=True)
metainfo = dict(CLASSES='class_name')
self.str_classmeta_dataset = BaseDataset(
metainfo=metainfo, lazy_init=True)
metainfo = dict(CLASSES=('bus', 'car'))
self.normal_classmeta_dataset = BaseDataset(
metainfo=metainfo, lazy_init=True)
# Setup valid model
valid_model = nn.Module()
valid_model.add_module('backbone', VGG(depth=11))
fused_semantic_head = FusedSemanticHead(
num_ins=1,
fusion_level=0,
num_convs=1,
in_channels=1,
conv_out_channels=1)
valid_model.add_module('semantic_head', fused_semantic_head)
rpn_head = nn.Module()
rpn_head.num_classes = 1
valid_model.add_module('rpn_head', rpn_head)
bbox_head = nn.Module()
bbox_head.num_classes = 2
valid_model.add_module('bbox_head', bbox_head)
self.valid_model = valid_model
# Setup invalid model
invalid_model = nn.Module()
bbox_head = nn.Module()
bbox_head.num_classes = 4
invalid_model.add_module('bbox_head', bbox_head)
self.invalid_model = invalid_model
def test_before_train_epch(self):
runner = deepcopy(self.runner)
# Test when dataset.metainfo['CLASSES'] is None
runner.train_dataloader.dataset = self.none_classmeta_dataset
self.hook.before_train_epoch(runner)
runner.logger.warning.assert_called_once()
# Test when dataset.metainfo['CLASSES'] is a str
runner.train_dataloader.dataset = self.str_classmeta_dataset
with self.assertRaises(AssertionError):
self.hook.before_train_epoch(runner)
runner.train_dataloader.dataset = self.normal_classmeta_dataset
# Test `num_classes` of model is compatible with dataset
runner.model = self.valid_model
self.hook.before_train_epoch(runner)
# Test `num_classes` of model is not compatible with dataset
runner.model = self.invalid_model
with self.assertRaises(AssertionError):
self.hook.before_train_epoch(runner)
def test_before_val_epoch(self):
runner = deepcopy(self.runner)
# Test when dataset.metainfo['CLASSES'] is None
runner.val_dataloader.dataset = self.none_classmeta_dataset
self.hook.before_val_epoch(runner)
runner.logger.warning.assert_called_once()
# Test when dataset.metainfo['CLASSES'] is a str
runner.val_dataloader.dataset = self.str_classmeta_dataset
with self.assertRaises(AssertionError):
self.hook.before_val_epoch(runner)
runner.val_dataloader.dataset = self.normal_classmeta_dataset
# Test `num_classes` of model is compatible with dataset
runner.model = self.valid_model
self.hook.before_val_epoch(runner)
# Test `num_classes` of model is not compatible with dataset
runner.model = self.invalid_model
with self.assertRaises(AssertionError):
self.hook.before_val_epoch(runner)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
|
_base_ = './ga-rpn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './ga_rpn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
# mypy: allow-untyped-defs
r"""Contains definitions of the methods used by the _BaseDataLoaderIter to put fetched tensors into pinned memory.
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import collections
import copy
import queue
import torch
from torch._utils import ExceptionWrapper
from . import MP_STATUS_CHECK_INTERVAL
def _pin_memory_loop(in_queue, out_queue, device_id, done_event, device):
# This setting is thread local, and prevents the copy in pin_memory from
# consuming all CPU cores.
torch.set_num_threads(1)
torch.multiprocessing._set_thread_name("pt_data_pin")
torch.accelerator.set_device_index(device_id)
def do_one_step():
try:
r = in_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
return
idx, data = r
if not done_event.is_set() and not isinstance(data, ExceptionWrapper):
try:
data = pin_memory(data, device)
except Exception:
data = ExceptionWrapper(
where=f"in pin memory thread for device {device_id}"
)
r = (idx, data)
while not done_event.is_set():
try:
out_queue.put(r, timeout=MP_STATUS_CHECK_INTERVAL)
break
except queue.Full:
continue
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
# logic of this function.
while not done_event.is_set():
# Make sure that we don't preserve any object from one iteration
# to the next
do_one_step()
def pin_memory(data, device=None):
if isinstance(data, torch.Tensor):
return data.pin_memory(device)
elif isinstance(data, (str, bytes)):
return data
elif isinstance(data, collections.abc.Mapping):
try:
if isinstance(data, collections.abc.MutableMapping):
# The sequence type may have extra properties, so we can't just
# use `type(data)(...)` to create the new sequence.
# Create a clone and update it if the sequence type is mutable.
clone = copy.copy(data)
clone.update(
{k: pin_memory(sample, device) for k, sample in data.items()}
)
return clone
else:
return type(data)({k: pin_memory(sample, device) for k, sample in data.items()}) # type: ignore[call-arg]
except TypeError:
# The mapping type may not support `copy()` / `update(mapping)`
# or `__init__(iterable)`.
return {k: pin_memory(sample, device) for k, sample in data.items()}
elif isinstance(data, tuple) and hasattr(data, "_fields"): # namedtuple
return type(data)(*(pin_memory(sample, device) for sample in data))
elif isinstance(data, tuple):
return [
pin_memory(sample, device) for sample in data
] # Backwards compatibility.
elif isinstance(data, collections.abc.Sequence):
try:
if isinstance(data, collections.abc.MutableSequence):
# The sequence type may have extra properties, so we can't just
# use `type(data)(...)` to create the new sequence.
# Create a clone and update it if the sequence type is mutable.
clone = copy.copy(data) # type: ignore[arg-type]
for i, item in enumerate(data):
clone[i] = pin_memory(item, device)
return clone
return type(data)([pin_memory(sample, device) for sample in data]) # type: ignore[call-arg]
except TypeError:
# The sequence type may not support `copy()` / `__setitem__(index, item)`
# or `__init__(iterable)` (e.g., `range`).
return [pin_memory(sample, device) for sample in data]
elif hasattr(data, "pin_memory"):
return data.pin_memory()
else:
return data
|
# mypy: allow-untyped-defs
r"""Contains definitions of the methods used by the _BaseDataLoaderIter to put fetched tensors into pinned memory.
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import collections
import copy
import queue
import torch
from torch._utils import ExceptionWrapper
from . import MP_STATUS_CHECK_INTERVAL
def _pin_memory_loop(in_queue, out_queue, device_id, done_event, device):
# This setting is thread local, and prevents the copy in pin_memory from
# consuming all CPU cores.
torch.set_num_threads(1)
torch.multiprocessing._set_thread_name("pt_data_pin")
if device == "cuda":
torch.cuda.set_device(device_id)
elif device == "xpu":
torch.xpu.set_device(device_id) # type: ignore[attr-defined]
elif device == torch._C._get_privateuse1_backend_name():
custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name())
custom_device_mod.set_device(device_id)
elif device is None:
torch.accelerator.set_device_index(device_id)
def do_one_step():
try:
r = in_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
return
idx, data = r
if not done_event.is_set() and not isinstance(data, ExceptionWrapper):
try:
data = pin_memory(data, device)
except Exception:
data = ExceptionWrapper(
where=f"in pin memory thread for device {device_id}"
)
r = (idx, data)
while not done_event.is_set():
try:
out_queue.put(r, timeout=MP_STATUS_CHECK_INTERVAL)
break
except queue.Full:
continue
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
# logic of this function.
while not done_event.is_set():
# Make sure that we don't preserve any object from one iteration
# to the next
do_one_step()
def pin_memory(data, device=None):
if isinstance(data, torch.Tensor):
return data.pin_memory(device)
elif isinstance(data, (str, bytes)):
return data
elif isinstance(data, collections.abc.Mapping):
try:
if isinstance(data, collections.abc.MutableMapping):
# The sequence type may have extra properties, so we can't just
# use `type(data)(...)` to create the new sequence.
# Create a clone and update it if the sequence type is mutable.
clone = copy.copy(data)
clone.update(
{k: pin_memory(sample, device) for k, sample in data.items()}
)
return clone
else:
return type(data)({k: pin_memory(sample, device) for k, sample in data.items()}) # type: ignore[call-arg]
except TypeError:
# The mapping type may not support `copy()` / `update(mapping)`
# or `__init__(iterable)`.
return {k: pin_memory(sample, device) for k, sample in data.items()}
elif isinstance(data, tuple) and hasattr(data, "_fields"): # namedtuple
return type(data)(*(pin_memory(sample, device) for sample in data))
elif isinstance(data, tuple):
return [
pin_memory(sample, device) for sample in data
] # Backwards compatibility.
elif isinstance(data, collections.abc.Sequence):
try:
if isinstance(data, collections.abc.MutableSequence):
# The sequence type may have extra properties, so we can't just
# use `type(data)(...)` to create the new sequence.
# Create a clone and update it if the sequence type is mutable.
clone = copy.copy(data) # type: ignore[arg-type]
for i, item in enumerate(data):
clone[i] = pin_memory(item, device)
return clone
return type(data)([pin_memory(sample, device) for sample in data]) # type: ignore[call-arg]
except TypeError:
# The sequence type may not support `copy()` / `__setitem__(index, item)`
# or `__init__(iterable)` (e.g., `range`).
return [pin_memory(sample, device) for sample in data]
elif hasattr(data, "pin_memory"):
return data.pin_memory()
else:
return data
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.task_modules.coders import (DeltaXYWHBBoxCoder,
DeltaXYWHBBoxCoderForGLIP)
def test_delta_bbox_coder():
coder = DeltaXYWHBBoxCoder()
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((32, 32)))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)
batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(32, 32))[0]
assert out.allclose(batch_out)
batch_out = coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32)])[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert rois.shape == out.shape
# test add_ctr_clamp
coder = DeltaXYWHBBoxCoder(add_ctr_clamp=True, ctr_clamp=2)
rois = torch.Tensor([[0., 0., 6., 6.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[1., 1., 2., 2.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 27.1672, 27.1672],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
coder = DeltaXYWHBBoxCoderForGLIP()
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 0.0000, 0.0000],
[0.1409, 0.1409, 1.8591, 1.8591],
[0.0000, 0.3161, 3.1945, 0.0000],
[5.0000, 5.0000, 4.0000, 4.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((32, 32)))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.task_modules.coders import DeltaXYWHBBoxCoder
def test_delta_bbox_coder():
coder = DeltaXYWHBBoxCoder()
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((32, 32)))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)
batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(32, 32))[0]
assert out.allclose(batch_out)
batch_out = coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32)])[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert rois.shape == out.shape
# test add_ctr_clamp
coder = DeltaXYWHBBoxCoder(add_ctr_clamp=True, ctr_clamp=2)
rois = torch.Tensor([[0., 0., 6., 6.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[1., 1., 2., 2.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 27.1672, 27.1672],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import legacy
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import tree
from keras.api import utils
from keras.api import visualization
from keras.api import wrappers
from keras.api._tf_keras.keras import backend
from keras.api._tf_keras.keras import layers
from keras.api._tf_keras.keras import losses
from keras.api._tf_keras.keras import metrics
from keras.api._tf_keras.keras import preprocessing
from keras.src.backend import Variable
from keras.src.backend import device
from keras.src.backend import name_scope
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import legacy
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import tree
from keras.api import utils
from keras.api import visualization
from keras.api._tf_keras.keras import backend
from keras.api._tf_keras.keras import layers
from keras.api._tf_keras.keras import losses
from keras.api._tf_keras.keras import metrics
from keras.api._tf_keras.keras import preprocessing
from keras.src.backend import Variable
from keras.src.backend import device
from keras.src.backend import name_scope
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
def preprocess_panoptic_gt(gt_labels, gt_masks, gt_semantic_seg, num_things,
num_stuff, img_metas):
"""Preprocess the ground truth for a image.
Args:
gt_labels (Tensor): Ground truth labels of each bbox,
with shape (num_gts, ).
gt_masks (BitmapMasks): Ground truth masks of each instances
of a image, shape (num_gts, h, w).
gt_semantic_seg (Tensor | None): Ground truth of semantic
segmentation with the shape (1, h, w).
[0, num_thing_class - 1] means things,
[num_thing_class, num_class-1] means stuff,
255 means VOID. It's None when training instance segmentation.
img_metas (dict): List of image meta information.
Returns:
tuple: a tuple containing the following targets.
- labels (Tensor): Ground truth class indices for a
image, with shape (n, ), n is the sum of number
of stuff type and number of instance in a image.
- masks (Tensor): Ground truth mask for a image, with
shape (n, h, w). Contains stuff and things when training
panoptic segmentation, and things only when training
instance segmentation.
"""
num_classes = num_things + num_stuff
things_masks = gt_masks.pad(img_metas['pad_shape'][:2], pad_val=0)\
.to_tensor(dtype=torch.bool, device=gt_labels.device)
if gt_semantic_seg is None:
masks = things_masks.long()
return gt_labels, masks
things_labels = gt_labels
gt_semantic_seg = gt_semantic_seg.squeeze(0)
semantic_labels = torch.unique(
gt_semantic_seg,
sorted=False,
return_inverse=False,
return_counts=False)
stuff_masks_list = []
stuff_labels_list = []
for label in semantic_labels:
if label < num_things or label >= num_classes:
continue
stuff_mask = gt_semantic_seg == label
stuff_masks_list.append(stuff_mask)
stuff_labels_list.append(label)
if len(stuff_masks_list) > 0:
stuff_masks = torch.stack(stuff_masks_list, dim=0)
stuff_labels = torch.stack(stuff_labels_list, dim=0)
labels = torch.cat([things_labels, stuff_labels], dim=0)
masks = torch.cat([things_masks, stuff_masks], dim=0)
else:
labels = things_labels
masks = things_masks
masks = masks.long()
return labels, masks
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
def preprocess_panoptic_gt(gt_labels, gt_masks, gt_semantic_seg, num_things,
num_stuff):
"""Preprocess the ground truth for a image.
Args:
gt_labels (Tensor): Ground truth labels of each bbox,
with shape (num_gts, ).
gt_masks (BitmapMasks): Ground truth masks of each instances
of a image, shape (num_gts, h, w).
gt_semantic_seg (Tensor): Ground truth of semantic
segmentation with the shape (1, h, w).
[0, num_thing_class - 1] means things,
[num_thing_class, num_class-1] means stuff,
255 means VOID.
target_shape (tuple[int]): Shape of output mask_preds.
Resize the masks to shape of mask_preds.
Returns:
tuple: a tuple containing the following targets.
- labels (Tensor): Ground truth class indices for a
image, with shape (n, ), n is the sum of number
of stuff type and number of instance in a image.
- masks (Tensor): Ground truth mask for a image, with
shape (n, h, w).
"""
num_classes = num_things + num_stuff
things_labels = gt_labels
gt_semantic_seg = gt_semantic_seg.squeeze(0)
things_masks = gt_masks.pad(gt_semantic_seg.shape[-2:], pad_val=0)\
.to_tensor(dtype=torch.bool, device=gt_labels.device)
semantic_labels = torch.unique(
gt_semantic_seg,
sorted=False,
return_inverse=False,
return_counts=False)
stuff_masks_list = []
stuff_labels_list = []
for label in semantic_labels:
if label < num_things or label >= num_classes:
continue
stuff_mask = gt_semantic_seg == label
stuff_masks_list.append(stuff_mask)
stuff_labels_list.append(label)
if len(stuff_masks_list) > 0:
stuff_masks = torch.stack(stuff_masks_list, dim=0)
stuff_labels = torch.stack(stuff_labels_list, dim=0)
labels = torch.cat([things_labels, stuff_labels], dim=0)
masks = torch.cat([things_masks, stuff_masks], dim=0)
else:
labels = things_labels
masks = things_masks
masks = masks.long()
return labels, masks
|
__version__ = '0.13.18'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.17'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .pipeline_switch_hook import PipelineSwitchHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .utils import trigger_visualization_hook
from .visualization_hook import (DetVisualizationHook,
GroundingVisualizationHook,
TrackVisualizationHook)
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook', 'DetVisualizationHook',
'NumClassCheckHook', 'MeanTeacherHook', 'trigger_visualization_hook',
'PipelineSwitchHook', 'TrackVisualizationHook',
'GroundingVisualizationHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .mean_teacher_hook import MeanTeacherHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .pipeline_switch_hook import PipelineSwitchHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .utils import trigger_visualization_hook
from .visualization_hook import DetVisualizationHook, TrackVisualizationHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook', 'DetVisualizationHook',
'NumClassCheckHook', 'MeanTeacherHook', 'trigger_visualization_hook',
'PipelineSwitchHook', 'TrackVisualizationHook'
]
|
import tempfile
import unittest
from transformers import LlavaConfig
class LlavaConfigTest(unittest.TestCase):
def test_llava_reload(self):
"""
Simple test for reloading default llava configs
"""
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig()
config.save_pretrained(tmp_dir)
reloaded = LlavaConfig.from_pretrained(tmp_dir)
assert config.to_dict() == reloaded.to_dict()
def test_pixtral_reload(self):
"""
Simple test for reloading pixtral configs
"""
vision_config = {
"model_type": "pixtral",
"head_dim": 64,
"hidden_act": "silu",
"image_size": 1024,
"is_composition": True,
"patch_size": 16,
"rope_theta": 10000.0,
"tie_word_embeddings": False,
}
text_config = {
"model_type": "mistral",
"hidden_size": 5120,
"head_dim": 128,
"num_attention_heads": 32,
"intermediate_size": 14336,
"is_composition": True,
"max_position_embeddings": 1024000,
"num_hidden_layers": 40,
"num_key_value_heads": 8,
"rms_norm_eps": 1e-05,
"rope_theta": 1000000000.0,
"sliding_window": None,
"vocab_size": 131072,
}
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig(vision_config=vision_config, text_config=text_config)
config.save_pretrained(tmp_dir)
reloaded = LlavaConfig.from_pretrained(tmp_dir)
assert config.to_dict() == reloaded.to_dict()
def test_arbitrary_reload(self):
"""
Simple test for reloading arbitrarily composed subconfigs
"""
default_values = LlavaConfig().to_diff_dict()
default_values["vision_config"]["model_type"] = "pixtral"
default_values["text_config"]["model_type"] = "opt"
self.maxDiff = None
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig(**default_values)
config.save_pretrained(tmp_dir)
reloaded = LlavaConfig.from_pretrained(tmp_dir)
self.assertDictEqual(config.to_dict(), reloaded.to_dict())
|
import tempfile
import unittest
from transformers import LlavaConfig
class LlavaConfigTest(unittest.TestCase):
def test_llava_reload(self):
"""
Simple test for reloading default llava configs
"""
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig()
config.save_pretrained(tmp_dir)
reloaded = LlavaConfig.from_pretrained(tmp_dir)
assert config.to_dict() == reloaded.to_dict()
def test_pixtral_reload(self):
"""
Simple test for reloading pixtral configs
"""
vision_config = {
"model_type": "pixtral",
"head_dim": 64,
"hidden_act": "silu",
"image_size": 1024,
"is_composition": True,
"patch_size": 16,
"rope_theta": 10000.0,
"tie_word_embeddings": False,
}
text_config = {
"model_type": "mistral",
"hidden_size": 5120,
"head_dim": 128,
"num_attention_heads": 32,
"intermediate_size": 14336,
"is_composition": True,
"max_position_embeddings": 1024000,
"num_hidden_layers": 40,
"num_key_value_heads": 8,
"rms_norm_eps": 1e-05,
"rope_theta": 1000000000.0,
"sliding_window": None,
"vocab_size": 131072,
}
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig(vision_config=vision_config, text_config=text_config)
config.save_pretrained(tmp_dir)
reloaded = LlavaConfig.from_pretrained(tmp_dir)
assert config.to_dict() == reloaded.to_dict()
def test_arbitrary_reload(self):
"""
Simple test for reloading arbirarily composed subconfigs
"""
default_values = LlavaConfig().to_diff_dict()
default_values["vision_config"]["model_type"] = "pixtral"
default_values["text_config"]["model_type"] = "opt"
self.maxDiff = None
with tempfile.TemporaryDirectory() as tmp_dir:
config = LlavaConfig(**default_values)
config.save_pretrained(tmp_dir)
reloaded = LlavaConfig.from_pretrained(tmp_dir)
self.assertDictEqual(config.to_dict(), reloaded.to_dict())
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from typing import Optional, Sequence
from mmengine.dist import is_main_process
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump
from mmengine.logging import MMLogger
from mmengine.structures import InstanceData
from mmdet.registry import METRICS
@METRICS.register_module()
class DumpProposals(BaseMetric):
"""Dump proposals pseudo metric.
Args:
output_dir (str): The root directory for ``proposals_file``.
Defaults to ''.
proposals_file (str): Proposals file path. Defaults to 'proposals.pkl'.
num_max_proposals (int, optional): Maximum number of proposals to dump.
If not specified, all proposals will be dumped.
file_client_args (dict, optional): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
"""
default_prefix: Optional[str] = 'dump_proposals'
def __init__(self,
output_dir: str = '',
proposals_file: str = 'proposals.pkl',
num_max_proposals: Optional[int] = None,
file_client_args: dict = None,
backend_args: dict = None,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
self.num_max_proposals = num_max_proposals
# TODO: update after mmengine finish refactor fileio.
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/main/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
self.output_dir = output_dir
assert proposals_file.endswith(('.pkl', '.pickle')), \
'The output file must be a pkl file.'
self.proposals_file = os.path.join(self.output_dir, proposals_file)
if is_main_process():
os.makedirs(self.output_dir, exist_ok=True)
def process(self, data_batch: Sequence[dict],
data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
pred = data_sample['pred_instances']
# `bboxes` is sorted by `scores`
ranked_scores, rank_inds = pred['scores'].sort(descending=True)
ranked_bboxes = pred['bboxes'][rank_inds, :]
ranked_bboxes = ranked_bboxes.cpu().numpy()
ranked_scores = ranked_scores.cpu().numpy()
pred_instance = InstanceData()
pred_instance.bboxes = ranked_bboxes
pred_instance.scores = ranked_scores
if self.num_max_proposals is not None:
pred_instance = pred_instance[:self.num_max_proposals]
img_path = data_sample['img_path']
# `file_name` is the key to obtain the proposals from the
# `proposals_list`.
file_name = osp.join(
osp.split(osp.split(img_path)[0])[-1],
osp.split(img_path)[-1])
result = {file_name: pred_instance}
self.results.append(result)
def compute_metrics(self, results: list) -> dict:
"""Dump the processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: An empty dict.
"""
logger: MMLogger = MMLogger.get_current_instance()
dump_results = {}
for result in results:
dump_results.update(result)
dump(
dump_results,
file=self.proposals_file,
backend_args=self.backend_args)
logger.info(f'Results are saved at {self.proposals_file}')
return {}
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from typing import Optional, Sequence
from mmengine.dist import is_main_process
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump
from mmengine.logging import MMLogger
from mmengine.structures import InstanceData
from mmdet.registry import METRICS
@METRICS.register_module()
class DumpProposals(BaseMetric):
"""Dump proposals pseudo metric.
Args:
output_dir (str): The root directory for ``proposals_file``.
Defaults to ''.
proposals_file (str): Proposals file path. Defaults to 'proposals.pkl'.
num_max_proposals (int, optional): Maximum number of proposals to dump.
If not specified, all proposals will be dumped.
file_client_args (dict, optional): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
"""
default_prefix: Optional[str] = 'dump_proposals'
def __init__(self,
output_dir: str = '',
proposals_file: str = 'proposals.pkl',
num_max_proposals: Optional[int] = None,
file_client_args: dict = None,
backend_args: dict = None,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
self.num_max_proposals = num_max_proposals
# TODO: update after mmengine finish refactor fileio.
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/dev-3.x/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
self.output_dir = output_dir
assert proposals_file.endswith(('.pkl', '.pickle')), \
'The output file must be a pkl file.'
self.proposals_file = os.path.join(self.output_dir, proposals_file)
if is_main_process():
os.makedirs(self.output_dir, exist_ok=True)
def process(self, data_batch: Sequence[dict],
data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
pred = data_sample['pred_instances']
# `bboxes` is sorted by `scores`
ranked_scores, rank_inds = pred['scores'].sort(descending=True)
ranked_bboxes = pred['bboxes'][rank_inds, :]
ranked_bboxes = ranked_bboxes.cpu().numpy()
ranked_scores = ranked_scores.cpu().numpy()
pred_instance = InstanceData()
pred_instance.bboxes = ranked_bboxes
pred_instance.scores = ranked_scores
if self.num_max_proposals is not None:
pred_instance = pred_instance[:self.num_max_proposals]
img_path = data_sample['img_path']
# `file_name` is the key to obtain the proposals from the
# `proposals_list`.
file_name = osp.join(
osp.split(osp.split(img_path)[0])[-1],
osp.split(img_path)[-1])
result = {file_name: pred_instance}
self.results.append(result)
def compute_metrics(self, results: list) -> dict:
"""Dump the processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: An empty dict.
"""
logger: MMLogger = MMLogger.get_current_instance()
dump_results = {}
for result in results:
dump_results.update(result)
dump(
dump_results,
file=self.proposals_file,
backend_args=self.backend_args)
logger.info(f'Results are saved at {self.proposals_file}')
return {}
|
from typing import Any, Optional, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_functions import (
OutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
class AnswerWithSources(BaseModel):
"""An answer to the question, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
sources: list[str] = Field(
..., description="List of sources used to answer the question"
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with structured responses: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: Union[dict, type[BaseModel]],
output_parser: str = "base",
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
verbose: bool = False,
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
"""
if output_parser == "pydantic":
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
msg = (
"Must provide a pydantic class for schema when output_parser is "
"'pydantic'."
)
raise ValueError(msg)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema
)
elif output_parser == "base":
_output_parser = OutputFunctionsParser()
else:
msg = (
f"Got unexpected output_parser: {output_parser}. "
f"Should be one of `pydantic` or `base`."
)
raise ValueError(msg)
if isinstance(schema, type) and is_basemodel_subclass(schema):
if hasattr(schema, "model_json_schema"):
schema_dict = cast(dict, schema.model_json_schema())
else:
schema_dict = cast(dict, schema.schema())
else:
schema_dict = cast(dict, schema)
function = {
"name": schema_dict["title"],
"description": schema_dict["description"],
"parameters": schema_dict,
}
llm_kwargs = get_llm_kwargs(function)
messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=_output_parser,
verbose=verbose,
)
return chain
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_sources_chain(
llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.
Args:
llm: Language model to use for the chain.
verbose: Whether to print the details of the chain
**kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`.
Returns:
Chain (LLMChain) that can be used to answer questions with citations.
"""
return create_qa_with_structure_chain(
llm, AnswerWithSources, verbose=verbose, **kwargs
)
|
from typing import Any, Optional, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_functions import (
OutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
class AnswerWithSources(BaseModel):
"""An answer to the question, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
sources: list[str] = Field(
..., description="List of sources used to answer the question"
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with structured responses: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: Union[dict, type[BaseModel]],
output_parser: str = "base",
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
verbose: bool = False,
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
"""
if output_parser == "pydantic":
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
raise ValueError(
"Must provide a pydantic class for schema when output_parser is "
"'pydantic'."
)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema
)
elif output_parser == "base":
_output_parser = OutputFunctionsParser()
else:
raise ValueError(
f"Got unexpected output_parser: {output_parser}. "
f"Should be one of `pydantic` or `base`."
)
if isinstance(schema, type) and is_basemodel_subclass(schema):
if hasattr(schema, "model_json_schema"):
schema_dict = cast(dict, schema.model_json_schema())
else:
schema_dict = cast(dict, schema.schema())
else:
schema_dict = cast(dict, schema)
function = {
"name": schema_dict["title"],
"description": schema_dict["description"],
"parameters": schema_dict,
}
llm_kwargs = get_llm_kwargs(function)
messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=_output_parser,
verbose=verbose,
)
return chain
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_sources_chain(
llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.
Args:
llm: Language model to use for the chain.
verbose: Whether to print the details of the chain
**kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`.
Returns:
Chain (LLMChain) that can be used to answer questions with citations.
"""
return create_qa_with_structure_chain(
llm, AnswerWithSources, verbose=verbose, **kwargs
)
|
import time
import pytest
from jina import Flow
from tests.integration.instrumentation import (
ExecutorFailureWithTracing,
ExecutorTestWithTracing,
get_services,
get_trace_ids,
get_traces,
partition_spans_by_kind,
spans_with_error,
)
@pytest.mark.parametrize(
'protocol, client_type, num_internal_spans',
[
('grpc', 'GRPCClient', 2),
('http', 'HTTPClient', 5),
('websocket', 'WebSocketClient', 7),
],
)
def test_gateway_instrumentation(
otlp_collector, protocol, client_type, num_internal_spans
):
f = Flow(
protocol=protocol,
tracing=True,
traces_exporter_host='localhost',
traces_exporter_port=4317,
).add(
uses=ExecutorTestWithTracing,
tracing=True,
traces_exporter_host='localhost',
traces_exporter_port=4317,
)
with f:
from jina import DocumentArray
f.post(f'/index', DocumentArray.empty(2), continue_on_error=True)
# give some time for the tracing and metrics exporters to finish exporting.
# the client is slow to export the data
time.sleep(8)
services = get_services()
expected_services = ['executor0/rep-0', 'gateway/rep-0', client_type]
assert len(services) == 3
assert set(services).issubset(expected_services)
client_traces = get_traces(client_type)
(server_spans, client_spans, internal_spans) = partition_spans_by_kind(
client_traces
)
assert len(server_spans) == 5
assert len(client_spans) == 5
assert len(internal_spans) == num_internal_spans
trace_ids = get_trace_ids(client_traces)
assert len(trace_ids) == 1
def test_executor_instrumentation(otlp_collector):
f = Flow(
tracing=True,
traces_exporter_host='localhost',
traces_exporter_port=4317,
).add(uses=ExecutorFailureWithTracing)
with f:
from jina import DocumentArray
f.post(f'/index', DocumentArray.empty(2), continue_on_error=True)
# give some time for the tracing and metrics exporters to finish exporting.
# the client is slow to export the data
time.sleep(8)
client_type = 'GRPCClient'
client_traces = get_traces(client_type)
(server_spans, client_spans, internal_spans) = partition_spans_by_kind(
client_traces
)
assert len(spans_with_error(server_spans)) == 0
assert len(spans_with_error(client_spans)) == 0
assert len(internal_spans) == 2
# Errors reported by DataRequestHandler and request method level spans
assert len(spans_with_error(internal_spans)) == 2
trace_ids = get_trace_ids(client_traces)
assert len(trace_ids) == 1
def test_head_instrumentation(otlp_collector):
f = Flow(
tracing=True,
traces_exporter_host='localhost',
traces_exporter_port=4317,
).add(uses=ExecutorTestWithTracing, shards=2)
with f:
from jina import DocumentArray
f.post(f'/index', DocumentArray.empty(2), continue_on_error=True)
# give some time for the tracing and metrics exporters to finish exporting.
# the client is slow to export the data
time.sleep(8)
client_type = 'GRPCClient'
client_traces = get_traces(client_type)
(server_spans, client_spans, internal_spans) = partition_spans_by_kind(
client_traces
)
assert len(server_spans) == 9
assert len(client_spans) == 9
assert len(internal_spans) == 2
services = get_services()
expected_services = [
'executor0/shard-0/rep-0',
'executor0/shard-1/rep-0',
'gateway/rep-0',
'executor0/head',
client_type,
]
assert len(services) == 5
assert set(services).issubset(expected_services)
trace_ids = get_trace_ids(client_traces)
assert len(trace_ids) == 1
|
import time
import pytest
from jina import Flow
from tests.integration.instrumentation import (
ExecutorFailureWithTracing,
ExecutorTestWithTracing,
get_services,
get_trace_ids,
get_traces,
partition_spans_by_kind,
spans_with_error,
)
@pytest.mark.parametrize(
'protocol, client_type, num_internal_spans',
[
('grpc', 'GRPCClient', 2),
('http', 'HTTPClient', 5),
('websocket', 'WebSocketClient', 7),
],
)
def test_gateway_instrumentation(
otlp_collector, protocol, client_type, num_internal_spans
):
f = Flow(
protocol=protocol,
tracing=True,
traces_exporter_host='localhost',
traces_exporter_port=4317,
).add(
uses=ExecutorTestWithTracing,
tracing=True,
traces_exporter_host='localhost',
traces_exporter_port=4317,
)
with f:
from jina import DocumentArray
f.post(f'/index', DocumentArray.empty(2), continue_on_error=True)
# give some time for the tracing and metrics exporters to finish exporting.
# the client is slow to export the data
time.sleep(8)
services = get_services()
expected_services = ['executor0/rep-0', 'gateway/rep-0', client_type]
assert len(services) == 3
assert set(services).issubset(expected_services)
client_traces = get_traces(client_type)
(server_spans, client_spans, internal_spans) = partition_spans_by_kind(
client_traces
)
assert len(server_spans) == 5
assert len(client_spans) == 5
assert len(internal_spans) == num_internal_spans
trace_ids = get_trace_ids(client_traces)
assert len(trace_ids) == 1
def test_executor_instrumentation(otlp_collector):
f = Flow(
tracing=True,
traces_exporter_host='localhost',
traces_exporter_port=4317,
).add(uses=ExecutorFailureWithTracing)
with f:
from jina import DocumentArray
f.post(f'/index', DocumentArray.empty(2), continue_on_error=True)
# give some time for the tracing and metrics exporters to finish exporting.
# the client is slow to export the data
time.sleep(8)
client_type = 'GRPCClient'
client_traces = get_traces(client_type)
(server_spans, client_spans, internal_spans) = partition_spans_by_kind(
client_traces
)
assert len(spans_with_error(server_spans)) == 0
assert len(spans_with_error(client_spans)) == 0
assert len(internal_spans) == 2
# Errors reported by DataRequestHandler and request method level spans
assert len(spans_with_error(internal_spans)) == 2
trace_ids = get_trace_ids(client_traces)
assert len(trace_ids) == 1
|
import os
import numpy as np
import pytest
from docarray import DocumentArray, Document
from docarray.array.annlite import DocumentArrayAnnlite
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.storage.annlite import AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
@pytest.fixture()
def embed_docs(pytestconfig):
index_files = [
f'{pytestconfig.rootdir}/tests/image-data/*.jpg',
]
query_file = [
f'{pytestconfig.rootdir}/tests/image-data/*.png',
]
dai = DocumentArray.from_files(index_files)
daq = DocumentArray.from_files(query_file)
for doc in daq:
doc.embedding = np.random.random(128)
for doc in dai:
doc.embedding = np.random.random(128)
return daq, dai
def test_empty_doc(embed_docs):
da = DocumentArray([Document(embedding=np.random.random(128))])
with pytest.raises(ValueError):
da[0].plot_matches_sprites()
daq, dai = embed_docs
with pytest.raises(ValueError):
daq[0].plot_matches_sprites()
with pytest.raises(ValueError):
daq[0].plot_matches_sprites(top_k=0)
@pytest.mark.parametrize('top_k', [1, 10, 20])
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128, scroll_batch_size=8)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
],
)
def test_matches_sprites(
pytestconfig, tmpdir, da_cls, config, embed_docs, start_storage, top_k
):
da, das = embed_docs
if config:
das = da_cls(das, config=config)
else:
das = da_cls(das)
da.match(das)
da[0].plot_matches_sprites(top_k, output=tmpdir / 'sprint_da.png')
assert os.path.exists(tmpdir / 'sprint_da.png')
@pytest.mark.parametrize('image_source', ['tensor', 'uri'])
@pytest.mark.parametrize(
'da_cls,config_gen',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, lambda: AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, lambda: WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, lambda: QdrantConfig(n_dim=128, scroll_batch_size=8)),
(DocumentArrayElastic, lambda: ElasticConfig(n_dim=128)),
],
)
def test_matches_sprite_image_generator(
pytestconfig,
tmpdir,
image_source,
da_cls,
config_gen,
embed_docs,
start_storage,
):
da, das = embed_docs
if image_source == 'tensor':
da.apply(lambda d: d.load_uri_to_image_tensor())
das.apply(lambda d: d.load_uri_to_image_tensor())
if config_gen:
das = da_cls(das, config=config_gen())
else:
das = da_cls(das)
da.match(das)
da[0].plot_matches_sprites(output=tmpdir / 'sprint_da.png')
assert os.path.exists(tmpdir / 'sprint_da.png')
|
import os
import numpy as np
import pytest
from docarray import DocumentArray, Document
from docarray.array.annlite import DocumentArrayAnnlite
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.storage.annlite import AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
@pytest.fixture()
def embed_docs(pytestconfig):
index_files = [
f'{pytestconfig.rootdir}/tests/image-data/*.jpg',
]
query_file = [
f'{pytestconfig.rootdir}/tests/image-data/*.png',
]
dai = DocumentArray.from_files(index_files)
daq = DocumentArray.from_files(query_file)
for doc in dai + daq:
doc.embedding = np.random.random(128)
return daq, dai
def test_empty_doc(embed_docs):
da = DocumentArray([Document(embedding=np.random.random(128))])
with pytest.raises(ValueError):
da[0].plot_matches_sprites()
daq, dai = embed_docs
with pytest.raises(ValueError):
daq[0].plot_matches_sprites()
with pytest.raises(ValueError):
daq[0].plot_matches_sprites(top_k=0)
@pytest.mark.parametrize('top_k', [1, 10, 20])
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128, scroll_batch_size=8)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
],
)
def test_matches_sprites(
pytestconfig, tmpdir, da_cls, config, embed_docs, start_storage, top_k
):
da, das = embed_docs
if config:
das = da_cls(das, config=config)
else:
das = da_cls(das)
da.match(das)
da[0].plot_matches_sprites(top_k, output=tmpdir / 'sprint_da.png')
assert os.path.exists(tmpdir / 'sprint_da.png')
@pytest.mark.parametrize('image_source', ['tensor', 'uri'])
@pytest.mark.parametrize(
'da_cls,config_gen',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, lambda: AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, lambda: WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, lambda: QdrantConfig(n_dim=128, scroll_batch_size=8)),
(DocumentArrayElastic, lambda: ElasticConfig(n_dim=128)),
],
)
def test_matches_sprite_image_generator(
pytestconfig,
tmpdir,
image_source,
da_cls,
config_gen,
embed_docs,
start_storage,
):
da, das = embed_docs
if image_source == 'tensor':
da.apply(lambda d: d.load_uri_to_image_tensor())
das.apply(lambda d: d.load_uri_to_image_tensor())
if config_gen:
das = da_cls(das, config=config_gen())
else:
das = da_cls(das)
da.match(das)
da[0].plot_matches_sprites(output=tmpdir / 'sprint_da.png')
assert os.path.exists(tmpdir / 'sprint_da.png')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class YOLACT(SingleStageInstanceSegmentor):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
mask_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils.typing import ConfigType, OptConfigType, OptMultiConfig
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class YOLACT(SingleStageInstanceSegmentor):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
mask_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
# MMEngine support the following two ways, users can choose
# according to convenience
# optim_wrapper = dict(type='AmpOptimWrapper')
_base_.optim_wrapper.type = 'AmpOptimWrapper'
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
fp16 = dict(loss_scale=512.)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class CenterNet(SingleStageDetector):
"""Implementation of CenterNet(Objects as Points)
<https://arxiv.org/abs/1904.07850>.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class CenterNet(SingleStageDetector):
"""Implementation of CenterNet(Objects as Points)
<https://arxiv.org/abs/1904.07850>.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
from tempfile import NamedTemporaryFile
import huggingface_hub
import pytest
import requests
from packaging import version
from datasets.utils.file_utils import fsspec_get, fsspec_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline, require_not_windows
@pytest.mark.integration
@require_not_windows # fsspec get keeps a file handle on windows that raises PermissionError
def test_offline_with_timeout():
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT):
with pytest.raises(RequestWouldHangIndefinitelyError):
requests.request("GET", "https://huggingface.co")
with pytest.raises(requests.exceptions.Timeout):
requests.request("GET", "https://huggingface.co", timeout=1.0)
# old versions of `huggingface_hub` don't have timeouts by default and don't allow to set timeouts in HfFileSystem
if version.parse(huggingface_hub.__version__) >= version.parse("0.23.0"):
with pytest.raises(requests.exceptions.Timeout), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
@pytest.mark.integration
@require_not_windows # fsspec get keeps a file handle on windows that raises PermissionError
def test_offline_with_connection_error():
with offline(OfflineSimulationMode.CONNECTION_FAILS):
with pytest.raises(requests.exceptions.ConnectionError):
requests.request("GET", "https://huggingface.co")
with pytest.raises(requests.exceptions.ConnectionError), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
def test_offline_with_datasets_offline_mode_enabled():
with offline(OfflineSimulationMode.HF_HUB_OFFLINE_SET_TO_1):
with pytest.raises(ConnectionError):
fsspec_head("hf://dummy")
with pytest.raises(ConnectionError), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
|
from tempfile import NamedTemporaryFile
import huggingface_hub
import pytest
import requests
from packaging import version
from datasets.utils.file_utils import fsspec_get, fsspec_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline, require_not_windows
@pytest.mark.integration
@require_not_windows # fsspec get keeps a file handle on windows that raises PermissionError
def test_offline_with_timeout():
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT):
with pytest.raises(RequestWouldHangIndefinitelyError):
requests.request("GET", "https://huggingface.co")
with pytest.raises(requests.exceptions.ConnectTimeout):
requests.request("GET", "https://huggingface.co", timeout=1.0)
# old versions of `huggingface_hub` don't have timeouts by default and don't allow to set timeouts in HfFileSystem
if version.parse(huggingface_hub.__version__) >= version.parse("0.23.0"):
with pytest.raises(requests.exceptions.ConnectTimeout), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
@pytest.mark.integration
@require_not_windows # fsspec get keeps a file handle on windows that raises PermissionError
def test_offline_with_connection_error():
with offline(OfflineSimulationMode.CONNECTION_FAILS):
with pytest.raises(requests.exceptions.ConnectionError):
requests.request("GET", "https://huggingface.co")
with pytest.raises(requests.exceptions.ConnectionError), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
def test_offline_with_datasets_offline_mode_enabled():
with offline(OfflineSimulationMode.HF_HUB_OFFLINE_SET_TO_1):
with pytest.raises(ConnectionError):
fsspec_head("hf://dummy")
with pytest.raises(ConnectionError), NamedTemporaryFile() as temp_file:
fsspec_get("hf://dummy", temp_file=temp_file)
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SparkDatasetReader(AbstractDatasetReader):
"""A dataset reader that reads from a Spark DataFrame.
When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be
provided. Streaming is not currently supported.
"""
def __init__(
self,
df: pyspark.sql.DataFrame,
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
streaming: bool = True,
cache_dir: str = None,
keep_in_memory: bool = False,
load_from_cache_file: bool = True,
file_format: str = "arrow",
**kwargs,
):
super().__init__(
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
**kwargs,
)
self._load_from_cache_file = load_from_cache_file
self._file_format = file_format
self.builder = Spark(
df=df,
features=features,
cache_dir=cache_dir,
**kwargs,
)
def read(self):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=download_mode,
file_format=self._file_format,
)
return self.builder.as_dataset(split=self.split)
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SparkDatasetReader(AbstractDatasetReader):
"""A dataset reader that reads from a Spark DataFrame.
When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be
provided. Streaming is not currently supported.
"""
def __init__(
self,
df: pyspark.sql.DataFrame,
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
load_from_cache_file: bool = True,
file_format: str = "arrow",
**kwargs,
):
super().__init__(
split=split,
features=features,
cache_dir=cache_dir,
**kwargs,
)
self._load_from_cache_file = load_from_cache_file
self._file_format = file_format
self.builder = Spark(
df=df,
features=features,
cache_dir=cache_dir,
**kwargs,
)
def read(self):
if self.streaming:
# TODO: Support as_streaming_dataset.
raise ValueError("SparkDatasetReader is not streamable.")
download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=download_mode,
file_format=self._file_format,
)
return self.builder.as_dataset(split=self.split)
|
from typing import Any, ForwardRef, Optional
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
from docarray.typing.id import ID
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (
(get_origin(x) in (list, tuple, dict, set))
or is_typevar(x)
or (type(x) == ForwardRef)
or is_typevar(x)
or x == ID
):
return False
return issubclass(x, a_tuple)
|
from typing import Any, Optional
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
from docarray.typing.id import ID
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (get_origin(x) in (list, tuple, dict, set)) or is_typevar(x) or x == ID:
return False
return issubclass(x, a_tuple)
|
_base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
type='GFL',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, False, True, True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './gfl_r50_fpn_mstrain_2x_coco.py'
model = dict(
type='GFL',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, False, True, True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
# dataset settings
dataset_type = 'LVISV05Dataset'
data_root = 'data/lvis_v0.5/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v0.5_train.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v0.5_val.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='LVISMetric',
ann_file=data_root + 'annotations/lvis_v0.5_val.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'LVISV05Dataset'
data_root = 'data/lvis_v0.5/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v0.5_train.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v0.5_val.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='LVISMetric',
ann_file=data_root + 'annotations/lvis_v0.5_val.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric=['bbox', 'segm'],
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_instance/test')
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric=['bbox', 'segm'],
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_instance/test')
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
# In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)],
# multiscale_mode='range'
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
import os
import urllib
import numpy as np
import PIL
import pytest
from pydantic.tools import parse_obj_as
from docarray.typing import ImageUrl
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
PATH_TO_IMAGE_DATA = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'image-data')
IMAGE_PATHS = {
'png': os.path.join(PATH_TO_IMAGE_DATA, 'so_good.png'),
'jpg': os.path.join(PATH_TO_IMAGE_DATA, '05984.jpg'),
'jpeg': os.path.join(PATH_TO_IMAGE_DATA, '05984-2.jpeg'),
}
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
def test_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, REMOTE_JPG)
uri._to_node_protobuf()
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
def test_load(image_format, path_to_img):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize('width,height', [(224, None), (None, 224), (224, 224)])
def test_load_width_height(image_format, path_to_img, width, height):
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(width=width, height=height)
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
if width:
assert shape[1] == width
if height:
assert shape[0] == height
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('remote-jpg', REMOTE_JPG),
],
)
@pytest.mark.parametrize(
'axis_layout',
[
('H', 'W', 'C'),
('H', 'C', 'W'),
('C', 'H', 'W'),
('C', 'W', 'H'),
('W', 'C', 'H'),
('W', 'H', 'C'),
],
)
def test_load_channel_axis(image_format, path_to_img, axis_layout):
sizes = {'H': 100, 'W': 200, 'C': 3}
url = parse_obj_as(ImageUrl, path_to_img)
tensor = url.load(axis_layout=axis_layout, height=sizes['H'], width=sizes['W'])
assert isinstance(tensor, np.ndarray)
shape = tensor.shape
for axis, axis_name in enumerate(axis_layout):
assert shape[axis] == sizes[axis_name]
def test_load_timeout():
url = parse_obj_as(ImageUrl, REMOTE_JPG)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
],
)
def test_load_to_bytes(image_format, path_to_img):
w, h = 224, 224
url = parse_obj_as(ImageUrl, path_to_img)
_bytes = url.load_to_bytes(width=w, height=h)
assert isinstance(_bytes, bytes)
img = PIL.Image.frombytes(mode='1', size=(w, h), data=_bytes)
assert isinstance(img, PIL.Image.Image)
@pytest.mark.parametrize(
'image_format,path_to_img',
[
('png', IMAGE_PATHS['png']),
('jpg', IMAGE_PATHS['jpg']),
('jpeg', IMAGE_PATHS['jpeg']),
('jpg', REMOTE_JPG),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
],
)
def test_validation(image_format, path_to_img):
if image_format == 'illegal':
with pytest.raises(ValueError):
parse_obj_as(ImageUrl, path_to_img)
else:
url = parse_obj_as(ImageUrl, path_to_img)
assert isinstance(url, ImageUrl)
assert isinstance(url, str)
|
import numpy as np
from pydantic.tools import parse_obj_as
from docarray.typing import ImageUrl
def test_image_url():
uri = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
def test_proto_image_url():
uri = parse_obj_as(ImageUrl, 'http://jina.ai/img.png')
uri._to_node_protobuf()
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from torch.testing._internal.common_utils import raise_on_run_directly
from torch.testing._internal.jit_utils import JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
class TestModules(JitTestCase):
def test_script_module_with_constants_list(self):
"""
Test that a module that has __constants__ set to something
that is not a set can be scripted.
"""
# torch.nn.Linear has a __constants__ attribute defined
# and initialized to a list.
class Net(torch.nn.Linear):
x: torch.jit.Final[int]
def __init__(self) -> None:
super().__init__(5, 10)
self.x = 0
self.checkModule(Net(), (torch.randn(5),))
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from torch.testing._internal.common_utils import raise_on_run_directly
from torch.testing._internal.jit_utils import JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
class TestModules(JitTestCase):
def test_script_module_with_constants_list(self):
"""
Test that a module that has __constants__ set to something
that is not a set can be scripted.
"""
# torch.nn.Linear has a __constants__ attribute defined
# and intialized to a list.
class Net(torch.nn.Linear):
x: torch.jit.Final[int]
def __init__(self) -> None:
super().__init__(5, 10)
self.x = 0
self.checkModule(Net(), (torch.randn(5),))
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
|
_base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
train_dataloader = dict(batch_size=1, num_workers=1)
# learning policy
max_epochs = 20
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
_base_ = './htc_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
data = dict(samples_per_gpu=1, workers_per_gpu=1)
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
|
_base_ = './solov2-light_r50_fpn_ms-3x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=18, init_cfg=dict(checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
_base_ = 'solov2_light_r50_fpn_mstrain_3x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=18, init_cfg=dict(checkpoint='torchvision://resnet18')),
neck=dict(in_channels=[64, 128, 256, 512]))
|
"""**Messages** are objects used in prompts and chat conversations.
**Class hierarchy:**
.. code-block::
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
**Main helpers:**
.. code-block::
ChatPromptTemplate
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.messages.ai import (
AIMessage,
AIMessageChunk,
)
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
message_to_dict,
messages_to_dict,
)
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
from langchain_core.messages.content_blocks import (
convert_to_openai_data_block,
convert_to_openai_image_block,
is_data_content_block,
)
from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk
from langchain_core.messages.human import HumanMessage, HumanMessageChunk
from langchain_core.messages.modifier import RemoveMessage
from langchain_core.messages.system import SystemMessage, SystemMessageChunk
from langchain_core.messages.tool import (
InvalidToolCall,
ToolCall,
ToolCallChunk,
ToolMessage,
ToolMessageChunk,
)
from langchain_core.messages.utils import (
AnyMessage,
MessageLikeRepresentation,
_message_from_dict,
convert_to_messages,
convert_to_openai_messages,
filter_messages,
get_buffer_string,
merge_message_runs,
message_chunk_to_message,
messages_from_dict,
trim_messages,
)
__all__ = (
"AIMessage",
"AIMessageChunk",
"AnyMessage",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"InvalidToolCall",
"MessageLikeRepresentation",
"RemoveMessage",
"SystemMessage",
"SystemMessageChunk",
"ToolCall",
"ToolCallChunk",
"ToolMessage",
"ToolMessageChunk",
"_message_from_dict",
"convert_to_messages",
"convert_to_openai_data_block",
"convert_to_openai_image_block",
"convert_to_openai_messages",
"filter_messages",
"get_buffer_string",
"is_data_content_block",
"merge_content",
"merge_message_runs",
"message_chunk_to_message",
"message_to_dict",
"messages_from_dict",
"messages_to_dict",
"trim_messages",
)
_dynamic_imports = {
"AIMessage": "ai",
"AIMessageChunk": "ai",
"BaseMessage": "base",
"BaseMessageChunk": "base",
"merge_content": "base",
"message_to_dict": "base",
"messages_to_dict": "base",
"ChatMessage": "chat",
"ChatMessageChunk": "chat",
"FunctionMessage": "function",
"FunctionMessageChunk": "function",
"HumanMessage": "human",
"HumanMessageChunk": "human",
"RemoveMessage": "modifier",
"SystemMessage": "system",
"SystemMessageChunk": "system",
"InvalidToolCall": "tool",
"ToolCall": "tool",
"ToolCallChunk": "tool",
"ToolMessage": "tool",
"ToolMessageChunk": "tool",
"AnyMessage": "utils",
"MessageLikeRepresentation": "utils",
"_message_from_dict": "utils",
"convert_to_messages": "utils",
"convert_to_openai_data_block": "content_blocks",
"convert_to_openai_image_block": "content_blocks",
"convert_to_openai_messages": "utils",
"filter_messages": "utils",
"get_buffer_string": "utils",
"is_data_content_block": "content_blocks",
"merge_message_runs": "utils",
"message_chunk_to_message": "utils",
"messages_from_dict": "utils",
"trim_messages": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Messages** are objects used in prompts and chat conversations.
**Class hierarchy:**
.. code-block::
BaseMessage --> SystemMessage, AIMessage, HumanMessage, ChatMessage, FunctionMessage, ToolMessage
--> BaseMessageChunk --> SystemMessageChunk, AIMessageChunk, HumanMessageChunk, ChatMessageChunk, FunctionMessageChunk, ToolMessageChunk
**Main helpers:**
.. code-block::
ChatPromptTemplate
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.messages.ai import (
AIMessage,
AIMessageChunk,
)
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
message_to_dict,
messages_to_dict,
)
from langchain_core.messages.chat import ChatMessage, ChatMessageChunk
from langchain_core.messages.content_blocks import (
convert_to_openai_data_block,
convert_to_openai_image_block,
is_data_content_block,
)
from langchain_core.messages.function import FunctionMessage, FunctionMessageChunk
from langchain_core.messages.human import HumanMessage, HumanMessageChunk
from langchain_core.messages.modifier import RemoveMessage
from langchain_core.messages.system import SystemMessage, SystemMessageChunk
from langchain_core.messages.tool import (
InvalidToolCall,
ToolCall,
ToolCallChunk,
ToolMessage,
ToolMessageChunk,
)
from langchain_core.messages.utils import (
AnyMessage,
MessageLikeRepresentation,
_message_from_dict,
convert_to_messages,
convert_to_openai_messages,
filter_messages,
get_buffer_string,
merge_message_runs,
message_chunk_to_message,
messages_from_dict,
trim_messages,
)
__all__ = (
"AIMessage",
"AIMessageChunk",
"AnyMessage",
"BaseMessage",
"BaseMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"InvalidToolCall",
"MessageLikeRepresentation",
"SystemMessage",
"SystemMessageChunk",
"ToolCall",
"ToolCallChunk",
"ToolMessage",
"ToolMessageChunk",
"RemoveMessage",
"_message_from_dict",
"convert_to_openai_data_block",
"convert_to_openai_image_block",
"convert_to_messages",
"get_buffer_string",
"is_data_content_block",
"merge_content",
"message_chunk_to_message",
"message_to_dict",
"messages_from_dict",
"messages_to_dict",
"filter_messages",
"merge_message_runs",
"trim_messages",
"convert_to_openai_messages",
)
_dynamic_imports = {
"AIMessage": "ai",
"AIMessageChunk": "ai",
"BaseMessage": "base",
"BaseMessageChunk": "base",
"merge_content": "base",
"message_to_dict": "base",
"messages_to_dict": "base",
"ChatMessage": "chat",
"ChatMessageChunk": "chat",
"FunctionMessage": "function",
"FunctionMessageChunk": "function",
"HumanMessage": "human",
"HumanMessageChunk": "human",
"RemoveMessage": "modifier",
"SystemMessage": "system",
"SystemMessageChunk": "system",
"InvalidToolCall": "tool",
"ToolCall": "tool",
"ToolCallChunk": "tool",
"ToolMessage": "tool",
"ToolMessageChunk": "tool",
"AnyMessage": "utils",
"MessageLikeRepresentation": "utils",
"_message_from_dict": "utils",
"convert_to_messages": "utils",
"convert_to_openai_data_block": "content_blocks",
"convert_to_openai_image_block": "content_blocks",
"convert_to_openai_messages": "utils",
"filter_messages": "utils",
"get_buffer_string": "utils",
"is_data_content_block": "content_blocks",
"merge_message_runs": "utils",
"message_chunk_to_message": "utils",
"messages_from_dict": "utils",
"trim_messages": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
from jsonschema import Draft7Validator
from jina.schemas import get_full_schema
def test_full_schema():
schema = get_full_schema()
Draft7Validator.check_schema(schema)
# assert jina concepts exist in definitions
for concept in ['gateway', 'flow', 'metas', 'deployment']:
assert f'Jina::{concept.capitalize()}' in schema['definitions']
|
from jsonschema import Draft7Validator
from jina.schemas import get_full_schema
def test_full_schema():
Draft7Validator.check_schema(get_full_schema())
|
from typing import Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
from docarray.typing.tensor.torch_tensor import TorchTensor
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.audio.audio_tensorflow_tensor import (
AudioTensorFlowTensor,
)
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp # type: ignore
from docarray.typing.tensor.audio.audio_jax_array import AudioJaxArray
from docarray.typing.tensor.jaxarray import JaxArray
T = TypeVar("T", bound="AudioTensor")
class AudioTensor(AnyTensor, AbstractAudioTensor):
"""
Represents an audio tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import AudioTensor
class MyAudioDoc(BaseDoc):
tensor: AudioTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyAudioDoc(tensor=tf.zeros(1000, 2))
type(doc.tensor) # AudioTensorFlowTensor
# Example usage with PyTorch:
import torch
doc = MyAudioDoc(tensor=torch.zeros(1000, 2))
type(doc.tensor) # AudioTorchTensor
# Example usage with NumPy:
import numpy as np
doc = MyAudioDoc(tensor=np.zeros((1000, 2)))
type(doc.tensor) # AudioNdArray
'''
---
Raises:
TypeError: If the input value is not a compatible type (torch.Tensor, tensorflow.Tensor, numpy.ndarray).
"""
@classmethod
def _docarray_validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(AudioTorchTensor, value)
elif isinstance(value, torch.Tensor):
return AudioTorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(AudioTensorFlowTensor, value)
elif isinstance(value, tf.Tensor):
return AudioTensorFlowTensor._docarray_from_native(value) # noqa
if jax_available:
if isinstance(value, JaxArray):
return cast(AudioJaxArray, value)
elif isinstance(value, jnp.ndarray):
return AudioJaxArray._docarray_from_native(value) # noqa
try:
return AudioNdArray._docarray_validate(value)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
from docarray.typing.tensor.torch_tensor import TorchTensor
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.audio.audio_tensorflow_tensor import (
AudioTensorFlowTensor,
)
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp # type: ignore
from docarray.typing.tensor.audio.audio_jax_array import AudioJaxArray
from docarray.typing.tensor.jaxarray import JaxArray
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="AudioTensor")
class AudioTensor(AnyTensor, AbstractAudioTensor):
"""
Represents an audio tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import AudioTensor
class MyAudioDoc(BaseDoc):
tensor: AudioTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyAudioDoc(tensor=tf.zeros(1000, 2))
type(doc.tensor) # AudioTensorFlowTensor
# Example usage with PyTorch:
import torch
doc = MyAudioDoc(tensor=torch.zeros(1000, 2))
type(doc.tensor) # AudioTorchTensor
# Example usage with NumPy:
import numpy as np
doc = MyAudioDoc(tensor=np.zeros((1000, 2)))
type(doc.tensor) # AudioNdArray
'''
---
Raises:
TypeError: If the input value is not a compatible type (torch.Tensor, tensorflow.Tensor, numpy.ndarray).
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(AudioTorchTensor, value)
elif isinstance(value, torch.Tensor):
return AudioTorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(AudioTensorFlowTensor, value)
elif isinstance(value, tf.Tensor):
return AudioTensorFlowTensor._docarray_from_native(value) # noqa
if jax_available:
if isinstance(value, JaxArray):
return cast(AudioJaxArray, value)
elif isinstance(value, jnp.ndarray):
return AudioJaxArray._docarray_from_native(value) # noqa
try:
return AudioNdArray.validate(value, field, config)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import is_simple_tensor
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@torch.jit.unused
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def erase(
inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT],
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]:
if not torch.jit.is_scripting():
_log_api_usage_once(erase)
if torch.jit.is_scripting() or is_simple_tensor(inpt):
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
elif isinstance(inpt, datapoints.Image):
output = erase_image_tensor(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Image.wrap_like(inpt, output)
elif isinstance(inpt, datapoints.Video):
output = erase_video(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Video.wrap_like(inpt, output)
elif isinstance(inpt, PIL.Image.Image):
return erase_image_pil(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
else:
raise TypeError(
f"Input can either be a plain tensor, an `Image` or `Video` datapoint, or a PIL image, "
f"but got {type(inpt)} instead."
)
|
from typing import Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import is_simple_tensor
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@torch.jit.unused
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def erase(
inpt: Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT],
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> Union[datapoints.ImageTypeJIT, datapoints.VideoTypeJIT]:
if not torch.jit.is_scripting():
_log_api_usage_once(erase)
if torch.jit.is_scripting() or is_simple_tensor(inpt):
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
elif isinstance(inpt, datapoints.Image):
output = erase_image_tensor(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Image.wrap_like(inpt, output)
elif isinstance(inpt, datapoints.Video):
output = erase_video(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return datapoints.Video.wrap_like(inpt, output)
elif isinstance(inpt, PIL.Image.Image):
return erase_image_pil(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
else:
raise TypeError(
f"Input can either be a plain tensor, an `Image` or `Video` datapoint, or a PIL image, "
f"but got {type(inpt)} instead."
)
|
import numpy as np
from docarray import Document
from docarray.typing import Embedding
def test_set_embedding():
class MyDocument(Document):
embedding: Embedding
d = MyDocument(embedding=np.zeros((3, 224, 224)))
assert isinstance(d.embedding, np.ndarray)
assert (d.embedding == np.zeros((3, 224, 224))).all()
|
import numpy as np
from docarray import Document
from docarray.typing import Embedding
def test_set_embedding():
class MyDocument(Document):
embedding: Embedding
d = MyDocument(embedding=np.zeros((3, 224, 224)))
assert isinstance(d.embedding, Embedding)
assert isinstance(d.embedding, np.ndarray)
assert (d.embedding == np.zeros((3, 224, 224))).all()
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from ..base.backend import BaseBackendMixin
from ....helper import dataclass_from_dict, filter_dict
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
columns: Optional[List[Tuple[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {'str': 'TEXT', 'float': 'float', 'int': 'integer'}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
**kwargs,
):
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
if self._config.columns is None:
self._config.columns = []
for i in range(len(self._config.columns)):
self._config.columns[i] = (
self._config.columns[i][0],
self._map_type(self._config.columns[i][1]),
)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
from .... import Document
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
)
import numpy as np
from ..base.backend import BaseBackendMixin
from ....helper import dataclass_from_dict, filter_dict
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
**kwargs,
):
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
from .... import Document
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
"""
A quantized model executes some or all of the operations with integers rather than floating point values. This allows for a more compact models and the use of high performance vectorized operations on many hardware platforms.
As a result, you get about 40% smaller and faster models. The speed-up depends on your CPU and how PyTorch was build and can be anywhere between 10% speed-up and 300% speed-up.
Note: Quantized models are only available for CPUs. Use a GPU, if available, for optimal performance.
For more details:
https://pytorch.org/docs/stable/quantization.html
"""
import csv
import gzip
import logging
import os
import time
import torch
from torch.nn import Embedding, Linear
from torch.quantization import quantize_dynamic
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Limit torch to 4 threads
torch.set_num_threads(4)
#### Just some code to print debug information to stdout
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
### /print debug information to stdout
model_name = "all-distilroberta-v1"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name, device="cpu")
q_model = quantize_dynamic(model, {Linear, Embedding})
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark dataset")
test_samples = []
sentences = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
sentences.append(row["sentence1"])
sentences.append(row["sentence2"])
if row["split"] == "test":
test_samples.append(inp_example)
sentences = sentences[0:10000]
logging.info("Evaluating speed of unquantized model")
start_time = time.time()
emb = model.encode(sentences, show_progress_bar=True)
diff_normal = time.time() - start_time
logging.info(f"Done after {diff_normal:.2f} sec. {len(sentences) / diff_normal:.2f} sentences / sec")
logging.info("Evaluating speed of quantized model")
start_time = time.time()
emb = q_model.encode(sentences, show_progress_bar=True)
diff_quantized = time.time() - start_time
logging.info(f"Done after {diff_quantized:.2f} sec. {len(sentences) / diff_quantized:.2f} sentences / sec")
logging.info(f"Speed-up: {diff_normal / diff_quantized:.2f}")
#########
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
logging.info("Evaluate regular model")
model.evaluate(evaluator)
print("\n\n")
logging.info("Evaluate quantized model")
q_model.evaluate(evaluator)
|
"""
A quantized model executes some or all of the operations with integers rather than floating point values. This allows for a more compact models and the use of high performance vectorized operations on many hardware platforms.
As a result, you get about 40% smaller and faster models. The speed-up depends on your CPU and how PyTorch was build and can be anywhere between 10% speed-up and 300% speed-up.
Note: Quantized models are only available for CPUs. Use a GPU, if available, for optimal performance.
For more details:
https://pytorch.org/docs/stable/quantization.html
"""
import csv
import gzip
import logging
import os
import time
import torch
from torch.nn import Embedding, Linear
from torch.quantization import quantize_dynamic
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Limit torch to 4 threads
torch.set_num_threads(4)
#### Just some code to print debug information to stdout
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
### /print debug information to stdout
model_name = "all-distilroberta-v1"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name, device="cpu")
q_model = quantize_dynamic(model, {Linear, Embedding})
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark dataset")
test_samples = []
sentences = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
sentences.append(row["sentence1"])
sentences.append(row["sentence2"])
if row["split"] == "test":
test_samples.append(inp_example)
sentences = sentences[0:10000]
logging.info("Evaluating speed of unquantized model")
start_time = time.time()
emb = model.encode(sentences, show_progress_bar=True)
diff_normal = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_normal, len(sentences) / diff_normal))
logging.info("Evaluating speed of quantized model")
start_time = time.time()
emb = q_model.encode(sentences, show_progress_bar=True)
diff_quantized = time.time() - start_time
logging.info("Done after {:.2f} sec. {:.2f} sentences / sec".format(diff_quantized, len(sentences) / diff_quantized))
logging.info("Speed-up: {:.2f}".format(diff_normal / diff_quantized))
#########
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
logging.info("Evaluate regular model")
model.evaluate(evaluator)
print("\n\n")
logging.info("Evaluate quantized model")
q_model.evaluate(evaluator)
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .hooks import *
from .registry import *
from .utils import *
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .registry import *
from .utils import *
|
import pytest
from docarray import DocumentArray
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=5)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=5)),
(DocumentArrayQdrant, QdrantConfig(n_dim=5)),
(DocumentArrayElastic, ElasticConfig(n_dim=5)),
(DocumentArrayRedis, RedisConfig(n_dim=5, flush=True)),
],
)
def test_empty_non_zero(da_cls, config, start_storage):
# Assert .empty provides a da with 0 docs
if config:
da = da_cls.empty(config=config)
else:
da = da_cls.empty()
assert len(da) == 0
# Assert .empty provides a da of the correct length
if config:
da = da_cls.empty(10, config=config)
else:
da = da_cls.empty(10)
assert len(da) == 10
|
import pytest
from docarray import DocumentArray
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=5)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=5)),
(DocumentArrayQdrant, QdrantConfig(n_dim=5)),
(DocumentArrayElastic, ElasticConfig(n_dim=5)),
],
)
def test_empty_non_zero(da_cls, config, start_storage):
# Assert .empty provides a da with 0 docs
if config:
da = da_cls.empty(config=config)
else:
da = da_cls.empty()
assert len(da) == 0
# Assert .empty provides a da of the correct length
if config:
da = da_cls.empty(10, config=config)
else:
da = da_cls.empty(10)
assert len(da) == 10
|
"""Fake LLMs for testing purposes."""
import asyncio
import time
from collections.abc import AsyncIterator, Iterator, Mapping
from typing import Any, Optional
from typing_extensions import override
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.llms import LLM
from langchain_core.runnables import RunnableConfig
class FakeListLLM(LLM):
"""Fake LLM for testing purposes."""
responses: list[str]
"""List of responses to return in order."""
# This parameter should be removed from FakeListLLM since
# it's only used by sub-classes.
sleep: Optional[float] = None
"""Sleep time in seconds between responses.
Ignored by FakeListLLM, but used by sub-classes.
"""
i: int = 0
"""Internally incremented after every model invocation.
Useful primarily for testing purposes.
"""
@property
@override
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake-list"
@override
def _call(
self,
prompt: str,
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Return next response."""
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
return response
@override
async def _acall(
self,
prompt: str,
stop: Optional[list[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Return next response."""
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
return response
@property
@override
def _identifying_params(self) -> Mapping[str, Any]:
return {"responses": self.responses}
class FakeListLLMError(Exception):
"""Fake error for testing purposes."""
class FakeStreamingListLLM(FakeListLLM):
"""Fake streaming list LLM for testing purposes.
An LLM that will return responses from a list in order.
This model also supports optionally sleeping between successive
chunks in a streaming implementation.
"""
error_on_chunk_number: Optional[int] = None
"""If set, will raise an exception on the specified chunk number."""
@override
def stream(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> Iterator[str]:
result = self.invoke(input, config)
for i_c, c in enumerate(result):
if self.sleep is not None:
time.sleep(self.sleep)
if (
self.error_on_chunk_number is not None
and i_c == self.error_on_chunk_number
):
raise FakeListLLMError
yield c
@override
async def astream(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> AsyncIterator[str]:
result = await self.ainvoke(input, config)
for i_c, c in enumerate(result):
if self.sleep is not None:
await asyncio.sleep(self.sleep)
if (
self.error_on_chunk_number is not None
and i_c == self.error_on_chunk_number
):
raise FakeListLLMError
yield c
|
import asyncio
import time
from collections.abc import AsyncIterator, Iterator, Mapping
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.llms import LLM
from langchain_core.runnables import RunnableConfig
class FakeListLLM(LLM):
"""Fake LLM for testing purposes."""
responses: list[str]
"""List of responses to return in order."""
# This parameter should be removed from FakeListLLM since
# it's only used by sub-classes.
sleep: Optional[float] = None
"""Sleep time in seconds between responses.
Ignored by FakeListLLM, but used by sub-classes.
"""
i: int = 0
"""Internally incremented after every model invocation.
Useful primarily for testing purposes.
"""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake-list"
def _call(
self,
prompt: str,
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Return next response."""
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
return response
async def _acall(
self,
prompt: str,
stop: Optional[list[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Return next response."""
response = self.responses[self.i]
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
return response
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {"responses": self.responses}
class FakeListLLMError(Exception):
"""Fake error for testing purposes."""
class FakeStreamingListLLM(FakeListLLM):
"""Fake streaming list LLM for testing purposes.
An LLM that will return responses from a list in order.
This model also supports optionally sleeping between successive
chunks in a streaming implementation.
"""
error_on_chunk_number: Optional[int] = None
"""If set, will raise an exception on the specified chunk number."""
def stream(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> Iterator[str]:
result = self.invoke(input, config)
for i_c, c in enumerate(result):
if self.sleep is not None:
time.sleep(self.sleep)
if (
self.error_on_chunk_number is not None
and i_c == self.error_on_chunk_number
):
raise FakeListLLMError
yield c
async def astream(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> AsyncIterator[str]:
result = await self.ainvoke(input, config)
for i_c, c in enumerate(result):
if self.sleep is not None:
await asyncio.sleep(self.sleep)
if (
self.error_on_chunk_number is not None
and i_c == self.error_on_chunk_number
):
raise FakeListLLMError
yield c
|
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.losses import SparseMultipleNegativesRankingLoss, SpladeLoss
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import SparseEncoderTrainingArguments
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize the SPLADE model
model_name = "sparse-embedding/splade-distilbert-base-uncased-init"
model = SparseEncoder(model_name)
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
lambda_query = 5e-5
lambda_corpus = 3e-5
loss = SpladeLoss(
model=model,
main_loss=SparseMultipleNegativesRankingLoss(model=model, scale=20, similarity_fct=model.similarity),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus,
) # Weight for document loss
run_name = f"splade-distilbert-nq-fresh-lq{lambda_query}-lc{lambda_corpus}"
os.makedirs(f"runs/{run_name}", exist_ok=True)
dev_evaluator = SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=16)
os.makedirs(f"runs/{run_name}/eval", exist_ok=True)
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=f"runs/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=12,
per_device_eval_batch_size=16,
bf16=True,
logging_steps=200,
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
learning_rate=4e-5,
run_name=run_name,
seed=42,
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model, output_path=f"runs/{run_name}/eval", epoch=1)
# 8. Save the trained & evaluated model locally
os.makedirs(f"runs/{run_name}/final", exist_ok=True)
model.save_pretrained(f"runs/{run_name}/final")
model.push_to_hub(f"sparse-embedding/{run_name}", private=True)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.losses import SparseMultipleNegativesRankingLoss, SpladeLoss
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import SparseEncoderTrainingArguments
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize the SPLADE model
model_name = "sparse-embedding/splade-distilbert-base-uncased-init"
model = SparseEncoder(model_name)
model.eval()
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
lambda_query = 5e-5
lambda_corpus = 3e-5
loss = SpladeLoss(
model=model,
main_loss=SparseMultipleNegativesRankingLoss(model=model, scale=20, similarity_fct=model.similarity),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus,
) # Weight for document loss
run_name = f"splade-distilbert-nq-fresh-lq{lambda_query}-lc{lambda_corpus}"
os.makedirs(f"runs/{run_name}", exist_ok=True)
dev_evaluator = SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=16)
os.makedirs(f"runs/{run_name}/eval", exist_ok=True)
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=f"runs/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=12,
per_device_eval_batch_size=16,
bf16=True,
logging_steps=200,
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
learning_rate=4e-5,
optim="adamw_torch",
run_name=run_name,
seed=42,
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model, output_path=f"runs/{run_name}/eval", epoch=1)
# 8. Save the trained & evaluated model locally
os.makedirs(f"runs/{run_name}/final", exist_ok=True)
model.save_pretrained(f"runs/{run_name}/final")
model.push_to_hub(f"sparse-embedding/{run_name}", private=True)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, models
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize model components
model_name = "microsoft/mpnet-base"
transformer = Transformer(model_name)
# transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = models.CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(evaluation.SparseNanoBEIREvaluator(["msmarco", "nfcorpus", "nq"], max_active_dims=k_dim))
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
dev_evaluator(model)
# Set up training arguments
run_name = "sparse-mpnet-base-nq-fresh"
training_args = SparseEncoderTrainingArguments(
output_dir=f"models/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=200,
eval_strategy="steps",
eval_steps=400,
save_strategy="steps",
save_steps=400,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name=run_name,
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save_pretrained(f"models/{run_name}/final")
model.push_to_hub(run_name)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, models
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize model components
model_name = "microsoft/mpnet-base"
transformer = Transformer(model_name)
# transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = models.CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(evaluation.SparseNanoBEIREvaluator(["msmarco", "nfcorpus", "nq"], truncate_dim=k_dim))
dev_evaluator = evaluation.SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
dev_evaluator(model)
# Set up training arguments
run_name = "sparse-mpnet-base-nq-fresh"
training_args = SparseEncoderTrainingArguments(
output_dir=f"models/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=200,
eval_strategy="steps",
eval_steps=400,
save_strategy="steps",
save_steps=400,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name=run_name,
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save_pretrained(f"models/{run_name}/final")
model.push_to_hub(run_name)
if __name__ == "__main__":
main()
|
import warnings
from typing import Any
import torch
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
"""[DEPREACTED] Use to_image() and to_dtype() instead."""
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image(...)` followed by `to_dtype(..., dtype=torch.float32, scale=True)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: torch.Tensor) -> list[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
import warnings
from typing import Any, List
import torch
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
"""[DEPREACTED] Use to_image() and to_dtype() instead."""
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image(...)` followed by `to_dtype(..., dtype=torch.float32, scale=True)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: torch.Tensor) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
import asyncio
import time
from multiprocessing import Event, Process
import aiohttp
import pytest
from jina import DocumentArray, Executor, Flow, requests
from jina.helper import random_port
from jina.types.request.data import DataRequest
INPUT_DA_LEN = 2
NUM_CLIENTS = 3
@pytest.fixture()
def gateway_port():
port = random_port()
yield port
class DummyExecutor(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray, **kwargs):
for d in docs:
d.text += f'{d.id} is fooed!'
def ws_flow(start_event, stop_event, gateway_port):
with Flow(protocol='websocket', port_expose=gateway_port).add(
uses=DummyExecutor
) as f:
start_event.set()
f.block(stop_event=stop_event)
def input_da_gen():
for i in range(5):
yield DocumentArray.empty(INPUT_DA_LEN)
time.sleep(1)
def json_requestify(da: DocumentArray, exec_endpoint='/foo'):
return {
'execEndpoint': exec_endpoint,
'data': {'docs': da.to_dict()},
}
def bytes_requestify(da: DocumentArray, exec_endpoint='/foo'):
r = DataRequest()
r._pb_body.header.exec_endpoint = exec_endpoint
r.data.docs_bytes = da.to_bytes()
return r.to_bytes()
@pytest.fixture
def flow_context(gateway_port):
start_event = Event()
stop_event = Event()
p = Process(
target=ws_flow,
args=(start_event, stop_event, gateway_port),
)
p.start()
start_event.wait()
yield
stop_event.set()
p.join()
async def json_sending_client(gateway_port):
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f'ws://localhost:{gateway_port}/',
) as ws:
for da in input_da_gen():
request = json_requestify(da)
await ws.send_json(request)
response = await ws.receive_json()
assert isinstance(response, dict)
assert response['header']['exec_endpoint'] == '/foo'
assert len(response['data']) == INPUT_DA_LEN
for doc in response['data']:
assert doc['text'] == f'{doc["id"]} is fooed!'
async def bytes_sending_client(gateway_port):
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f'ws://localhost:{gateway_port}/',
protocols=('bytes',),
) as ws:
for da in input_da_gen():
request = bytes_requestify(da)
await ws.send_bytes(request)
response = await ws.receive_bytes()
assert isinstance(response, bytes)
dict_response = DataRequest(response).to_dict()
assert dict_response['header']['exec_endpoint'] == '/foo'
assert len(dict_response['data']) == INPUT_DA_LEN
for doc in dict_response['data']:
assert doc['text'] == f'{doc["id"]} is fooed!'
@pytest.mark.asyncio
async def test_json_single_client(flow_context, gateway_port):
await json_sending_client(gateway_port)
@pytest.mark.asyncio
async def test_json_multiple_clients(flow_context, gateway_port):
await asyncio.wait([json_sending_client(gateway_port) for _ in range(NUM_CLIENTS)])
@pytest.mark.asyncio
async def test_bytes_single_client(flow_context, gateway_port):
await bytes_sending_client(gateway_port)
@pytest.mark.asyncio
async def test_bytes_multiple_clients(flow_context, gateway_port):
await asyncio.wait([bytes_sending_client(gateway_port) for _ in range(NUM_CLIENTS)])
|
import asyncio
import time
from multiprocessing import Event, Process
import aiohttp
import pytest
from jina import DocumentArray, Executor, Flow, requests
from jina.types.request.data import DataRequest
from jina.helper import random_port
INPUT_DA_LEN = 2
NUM_CLIENTS = 3
@pytest.fixture()
def gateway_port():
port = random_port()
yield port
class DummyExecutor(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray, **kwargs):
for d in docs:
d.text += f'{d.id} is fooed!'
def ws_flow(start_event, stop_event, gateway_port):
with Flow(protocol='websocket', port_expose=gateway_port).add(
uses=DummyExecutor
) as f:
start_event.set()
f.block(stop_event=stop_event)
def input_da_gen():
for i in range(5):
yield DocumentArray.empty(INPUT_DA_LEN)
time.sleep(1)
def json_requestify(da: DocumentArray, exec_endpoint='/foo'):
return {
'execEndpoint': exec_endpoint,
'data': {'docs': da.to_dict()},
}
def bytes_requestify(da: DocumentArray, exec_endpoint='/foo'):
r = DataRequest()
r._pb_body.header.exec_endpoint = exec_endpoint
r.data.docs_bytes = da.to_bytes()
return r.to_bytes()
@pytest.fixture
def flow_context(gateway_port):
start_event = Event()
stop_event = Event()
p = Process(
target=ws_flow,
args=(
start_event,
stop_event,
gateway_port
),
)
p.start()
start_event.wait()
yield
stop_event.set()
p.join()
async def json_sending_client(gateway_port):
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f'ws://localhost:{gateway_port}/',
) as ws:
for da in input_da_gen():
request = json_requestify(da)
await ws.send_json(request)
response = await ws.receive_json()
assert isinstance(response, dict)
assert response['header']['exec_endpoint'] == '/foo'
assert len(response['data']) == INPUT_DA_LEN
for doc in response['data']:
assert doc['text'] == f'{doc["id"]} is fooed!'
async def bytes_sending_client(gateway_port):
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f'ws://localhost:{gateway_port}/',
protocols=('bytes',),
) as ws:
for da in input_da_gen():
request = bytes_requestify(da)
await ws.send_bytes(request)
response = await ws.receive_bytes()
assert isinstance(response, bytes)
dict_response = DataRequest(response).to_dict()
assert dict_response['header']['exec_endpoint'] == '/foo'
assert len(dict_response['data']) == INPUT_DA_LEN
for doc in dict_response['data']:
assert doc['text'] == f'{doc["id"]} is fooed!'
@pytest.mark.asyncio
async def test_json_single_client(flow_context, gateway_port):
await json_sending_client(gateway_port)
@pytest.mark.asyncio
async def test_json_multiple_clients(flow_context, gateway_port):
await asyncio.wait([json_sending_client(gateway_port) for _ in range(NUM_CLIENTS)])
@pytest.mark.asyncio
async def test_bytes_single_client(flow_context, gateway_port):
await bytes_sending_client(gateway_port)
@pytest.mark.asyncio
async def test_bytes_multiple_clients(flow_context, gateway_port):
await asyncio.wait([bytes_sending_client(gateway_port) for _ in range(NUM_CLIENTS)])
|
import os
from time import time
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
pytestmark = [pytest.mark.benchmark, pytest.mark.slow]
class MyMatrix(BaseDoc):
matrix: NdArray
def cpu_intensive(doc: MyMatrix) -> MyMatrix:
# some cpu intensive function
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return doc
def test_map_docs_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 5
rng = np.random.RandomState(0)
matrices = [rng.random(size=(1000, 1000)) for _ in range(n_docs)]
da = DocList[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs(
docs=da,
func=cpu_intensive,
backend='process',
num_worker=num_workers,
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def cpu_intensive_batch(da: DocList[MyMatrix]) -> DocList[MyMatrix]:
# some cpu intensive function
for doc in da:
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return da
def test_map_docs_batched_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 16
rng = np.random.RandomState(0)
matrices = [rng.random(size=(1000, 1000)) for _ in range(n_docs)]
da = DocList[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs_batched(
docs=da,
func=cpu_intensive_batch,
batch_size=8,
backend='process',
num_worker=num_workers,
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def io_intensive(img: ImageDoc) -> ImageDoc:
# some io intensive function: load and set image url
img.tensor = img.url.load()
return img
def test_map_docs_multithreading():
def time_multithreading(num_workers: int) -> float:
n_docs = 100
da = DocList[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs(
docs=da, func=io_intensive, backend='thread', num_worker=num_workers
)
)
return time() - start_time
time_1_thread = time_multithreading(num_workers=1)
time_2_thread = time_multithreading(num_workers=2)
assert time_2_thread < time_1_thread
def io_intensive_batch(da: DocList[ImageDoc]) -> DocList[ImageDoc]:
# some io intensive function: load and set image url
for doc in da:
doc.tensor = doc.url.load()
return da
def test_map_docs_batched_multithreading():
def time_multithreading_batch(num_workers: int) -> float:
n_docs = 100
da = DocList[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs_batched(
docs=da,
func=io_intensive_batch,
backend='thread',
num_worker=num_workers,
batch_size=10,
)
)
return time() - start_time
time_1_thread = time_multithreading_batch(num_workers=1)
time_2_thread = time_multithreading_batch(num_workers=2)
assert time_2_thread < time_1_thread
|
import os
from time import time
import numpy as np
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
from docarray.utils.map import map_docs, map_docs_batched
from tests.units.typing.test_bytes import IMAGE_PATHS
pytestmark = [pytest.mark.benchmark, pytest.mark.slow]
class MyMatrix(BaseDoc):
matrix: NdArray
def cpu_intensive(doc: MyMatrix) -> MyMatrix:
# some cpu intensive function
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return doc
def test_map_docs_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 5
rng = np.random.RandomState(0)
matrices = [rng.random(size=(1000, 1000)) for _ in range(n_docs)]
da = DocArray[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs(
da=da, func=cpu_intensive, backend='process', num_worker=num_workers
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def cpu_intensive_batch(da: DocArray[MyMatrix]) -> DocArray[MyMatrix]:
# some cpu intensive function
for doc in da:
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return da
def test_map_docs_batched_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 16
rng = np.random.RandomState(0)
matrices = [rng.random(size=(1000, 1000)) for _ in range(n_docs)]
da = DocArray[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs_batched(
da=da,
func=cpu_intensive_batch,
batch_size=8,
backend='process',
num_worker=num_workers,
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def io_intensive(img: ImageDoc) -> ImageDoc:
# some io intensive function: load and set image url
img.tensor = img.url.load()
return img
def test_map_docs_multithreading():
def time_multithreading(num_workers: int) -> float:
n_docs = 100
da = DocArray[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs(da=da, func=io_intensive, backend='thread', num_worker=num_workers)
)
return time() - start_time
time_1_thread = time_multithreading(num_workers=1)
time_2_thread = time_multithreading(num_workers=2)
assert time_2_thread < time_1_thread
def io_intensive_batch(da: DocArray[ImageDoc]) -> DocArray[ImageDoc]:
# some io intensive function: load and set image url
for doc in da:
doc.tensor = doc.url.load()
return da
def test_map_docs_batched_multithreading():
def time_multithreading_batch(num_workers: int) -> float:
n_docs = 100
da = DocArray[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs_batched(
da=da,
func=io_intensive_batch,
backend='thread',
num_worker=num_workers,
batch_size=10,
)
)
return time() - start_time
time_1_thread = time_multithreading_batch(num_workers=1)
time_2_thread = time_multithreading_batch(num_workers=2)
assert time_2_thread < time_1_thread
|
from jina import Document, Flow
from sentencizer import Sentencizer
def test_exec():
f = Flow().add(uses=Sentencizer)
with f:
resp = f.post(
on='/test',
inputs=Document(text='Hello. World! Go? Back'),
)
assert resp[0].chunks[0].text == 'Hello.'
assert resp[0].chunks[1].text == 'World!'
assert resp[0].chunks[2].text == 'Go?'
assert resp[0].chunks[3].text == 'Back'
|
from jina import Document, Flow
from sentencizer import Sentencizer
def test_exec():
f = Flow().add(uses=Sentencizer)
with f:
resp = f.post(
on='/test',
inputs=Document(text='Hello. World! Go? Back'),
return_results=True,
)
assert resp[0].docs[0].chunks[0].text == 'Hello.'
assert resp[0].docs[0].chunks[1].text == 'World!'
assert resp[0].docs[0].chunks[2].text == 'Go?'
assert resp[0].docs[0].chunks[3].text == 'Back'
|
from __future__ import annotations
class InputExample:
"""Structure for one input example with texts, the label and a unique id"""
def __init__(self, guid: str = "", texts: list[str] = None, label: int | float = 0):
"""
Creates one InputExample with the given texts, guid and label
Args:
guid: id for the example
texts: the texts for the example.
label: the label for the example
"""
self.guid = guid
self.texts = texts
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
|
from typing import List, Union
class InputExample:
"""Structure for one input example with texts, the label and a unique id"""
def __init__(self, guid: str = "", texts: List[str] = None, label: Union[int, float] = 0):
"""
Creates one InputExample with the given texts, guid and label
Args:
guid: id for the example
texts: the texts for the example.
label: the label for the example
"""
self.guid = guid
self.texts = texts
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway
from jina.serve.runtimes.gateway.streamer import GatewayStreamer
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGatewayGetStreamer(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
self.streamer_obj = GatewayStreamer.get_streamer()
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for docs in self.streamer_obj.stream_docs(
docs=DocumentArray([Document(text=text)]),
exec_endpoint='/',
):
doc = docs[0]
return {'text': doc.text, 'tags': doc.tags}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway
from jina.serve.streamer import GatewayStreamer
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGatewayGetStreamer(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
self.streamer_obj = GatewayStreamer.get_streamer()
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for docs in self.streamer_obj.stream_docs(
docs=DocumentArray([Document(text=text)]),
exec_endpoint='/',
):
doc = docs[0]
return {'text': doc.text, 'tags': doc.tags}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
_base_ = './faster-rcnn_r50-caffe-dc5_ms-1x_coco.py'
# MMEngine support the following two ways, users can choose
# according to convenience
# param_scheduler = [
# dict(
# type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500), # noqa
# dict(
# type='MultiStepLR',
# begin=0,
# end=12,
# by_epoch=True,
# milestones=[28, 34],
# gamma=0.1)
# ]
_base_.param_scheduler[1].milestones = [28, 34]
train_cfg = dict(max_epochs=36)
|
_base_ = './faster-rcnn_r50-caffe-dc5_ms-1x_coco.py'
# learning policy
lr_config = dict(step=[28, 34])
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling, read_mat
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "svhn"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class SVHN(Dataset):
"""SVHN Dataset.
homepage="http://ufldl.stanford.edu/housenumbers/",
dependencies = scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test", "extra"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_CHECKSUMS = {
"train": "435e94d69a87fde4fd4d7f3dd208dfc32cb6ae8af2240d066de1df7508d083b8",
"test": "cdce80dfb2a2c4c6160906d0bd7c68ec5a99d7ca4831afa54f09182025b6a75b",
"extra": "a133a4beb38a00fcdda90c9489e0c04f900b660ce8a316a5e854838379a71eb3",
}
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
f"http://ufldl.stanford.edu/housenumbers/{self._split}_32x32.mat",
sha256=self._CHECKSUMS[self._split],
)
return [data]
def _read_images_and_labels(self, data: Tuple[str, BinaryIO]) -> List[Tuple[np.ndarray, np.ndarray]]:
_, buffer = data
content = read_mat(buffer)
return list(
zip(
content["X"].transpose((3, 0, 1, 2)),
content["y"].squeeze(),
)
)
def _prepare_sample(self, data: Tuple[np.ndarray, np.ndarray]) -> Dict[str, Any]:
image_array, label_array = data
return dict(
image=Image(image_array.transpose((2, 0, 1))),
label=Label(int(label_array) % 10, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Mapper(dp, self._read_images_and_labels)
dp = UnBatcher(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 73_257,
"test": 26_032,
"extra": 531_131,
}[self._split]
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.datapoints import Image
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling, read_mat
from .._api import register_dataset, register_info
NAME = "svhn"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class SVHN(Dataset):
"""SVHN Dataset.
homepage="http://ufldl.stanford.edu/housenumbers/",
dependencies = scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test", "extra"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_CHECKSUMS = {
"train": "435e94d69a87fde4fd4d7f3dd208dfc32cb6ae8af2240d066de1df7508d083b8",
"test": "cdce80dfb2a2c4c6160906d0bd7c68ec5a99d7ca4831afa54f09182025b6a75b",
"extra": "a133a4beb38a00fcdda90c9489e0c04f900b660ce8a316a5e854838379a71eb3",
}
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
f"http://ufldl.stanford.edu/housenumbers/{self._split}_32x32.mat",
sha256=self._CHECKSUMS[self._split],
)
return [data]
def _read_images_and_labels(self, data: Tuple[str, BinaryIO]) -> List[Tuple[np.ndarray, np.ndarray]]:
_, buffer = data
content = read_mat(buffer)
return list(
zip(
content["X"].transpose((3, 0, 1, 2)),
content["y"].squeeze(),
)
)
def _prepare_sample(self, data: Tuple[np.ndarray, np.ndarray]) -> Dict[str, Any]:
image_array, label_array = data
return dict(
image=Image(image_array.transpose((2, 0, 1))),
label=Label(int(label_array) % 10, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Mapper(dp, self._read_images_and_labels)
dp = UnBatcher(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 73_257,
"test": 26_032,
"extra": 531_131,
}[self._split]
|
from typing import Optional
from langchain_core.callbacks.manager import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain.retrievers.ensemble import EnsembleRetriever
class MockRetriever(BaseRetriever):
docs: list[Document]
def _get_relevant_documents(
self,
query: str,
*,
run_manager: Optional[CallbackManagerForRetrieverRun] = None,
) -> list[Document]:
"""Return the documents"""
return self.docs
def test_invoke() -> None:
documents1 = [
Document(page_content="a", metadata={"id": 1}),
Document(page_content="b", metadata={"id": 2}),
Document(page_content="c", metadata={"id": 3}),
]
documents2 = [Document(page_content="b")]
retriever1 = MockRetriever(docs=documents1)
retriever2 = MockRetriever(docs=documents2)
ensemble_retriever = EnsembleRetriever(
retrievers=[retriever1, retriever2], weights=[0.5, 0.5], id_key=None
)
ranked_documents = ensemble_retriever.invoke("_")
# The document with page_content "b" in documents2
# will be merged with the document with page_content "b"
# in documents1, so the length of ranked_documents should be 3.
# Additionally, the document with page_content "b" will be ranked 1st.
assert len(ranked_documents) == 3
assert ranked_documents[0].page_content == "b"
documents1 = [
Document(page_content="a", metadata={"id": 1}),
Document(page_content="b", metadata={"id": 2}),
Document(page_content="c", metadata={"id": 3}),
]
documents2 = [Document(page_content="d")]
retriever1 = MockRetriever(docs=documents1)
retriever2 = MockRetriever(docs=documents2)
ensemble_retriever = EnsembleRetriever(
retrievers=[retriever1, retriever2], weights=[0.5, 0.5], id_key=None
)
ranked_documents = ensemble_retriever.invoke("_")
# The document with page_content "d" in documents2 will not be merged
# with any document in documents1, so the length of ranked_documents
# should be 4. The document with page_content "a" and the document
# with page_content "d" will have the same score, but the document
# with page_content "a" will be ranked 1st because retriever1 has a smaller index.
assert len(ranked_documents) == 4
assert ranked_documents[0].page_content == "a"
documents1 = [
Document(page_content="a", metadata={"id": 1}),
Document(page_content="b", metadata={"id": 2}),
Document(page_content="c", metadata={"id": 3}),
]
documents2 = [Document(page_content="d", metadata={"id": 2})]
retriever1 = MockRetriever(docs=documents1)
retriever2 = MockRetriever(docs=documents2)
ensemble_retriever = EnsembleRetriever(
retrievers=[retriever1, retriever2], weights=[0.5, 0.5], id_key="id"
)
ranked_documents = ensemble_retriever.invoke("_")
# Since id_key is specified, the document with id 2 will be merged.
# Therefore, the length of ranked_documents should be 3.
# Additionally, the document with page_content "b" will be ranked 1st.
assert len(ranked_documents) == 3
assert ranked_documents[0].page_content == "b"
|
from typing import List, Optional
from langchain_core.callbacks.manager import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain.retrievers.ensemble import EnsembleRetriever
class MockRetriever(BaseRetriever):
docs: List[Document]
def _get_relevant_documents(
self,
query: str,
*,
run_manager: Optional[CallbackManagerForRetrieverRun] = None,
) -> List[Document]:
"""Return the documents"""
return self.docs
def test_invoke() -> None:
documents1 = [
Document(page_content="a", metadata={"id": 1}),
Document(page_content="b", metadata={"id": 2}),
Document(page_content="c", metadata={"id": 3}),
]
documents2 = [Document(page_content="b")]
retriever1 = MockRetriever(docs=documents1)
retriever2 = MockRetriever(docs=documents2)
ensemble_retriever = EnsembleRetriever(
retrievers=[retriever1, retriever2], weights=[0.5, 0.5], id_key=None
)
ranked_documents = ensemble_retriever.invoke("_")
# The document with page_content "b" in documents2
# will be merged with the document with page_content "b"
# in documents1, so the length of ranked_documents should be 3.
# Additionally, the document with page_content "b" will be ranked 1st.
assert len(ranked_documents) == 3
assert ranked_documents[0].page_content == "b"
documents1 = [
Document(page_content="a", metadata={"id": 1}),
Document(page_content="b", metadata={"id": 2}),
Document(page_content="c", metadata={"id": 3}),
]
documents2 = [Document(page_content="d")]
retriever1 = MockRetriever(docs=documents1)
retriever2 = MockRetriever(docs=documents2)
ensemble_retriever = EnsembleRetriever(
retrievers=[retriever1, retriever2], weights=[0.5, 0.5], id_key=None
)
ranked_documents = ensemble_retriever.invoke("_")
# The document with page_content "d" in documents2 will not be merged
# with any document in documents1, so the length of ranked_documents
# should be 4. The document with page_content "a" and the document
# with page_content "d" will have the same score, but the document
# with page_content "a" will be ranked 1st because retriever1 has a smaller index.
assert len(ranked_documents) == 4
assert ranked_documents[0].page_content == "a"
documents1 = [
Document(page_content="a", metadata={"id": 1}),
Document(page_content="b", metadata={"id": 2}),
Document(page_content="c", metadata={"id": 3}),
]
documents2 = [Document(page_content="d", metadata={"id": 2})]
retriever1 = MockRetriever(docs=documents1)
retriever2 = MockRetriever(docs=documents2)
ensemble_retriever = EnsembleRetriever(
retrievers=[retriever1, retriever2], weights=[0.5, 0.5], id_key="id"
)
ranked_documents = ensemble_retriever.invoke("_")
# Since id_key is specified, the document with id 2 will be merged.
# Therefore, the length of ranked_documents should be 3.
# Additionally, the document with page_content "b" will be ranked 1st.
assert len(ranked_documents) == 3
assert ranked_documents[0].page_content == "b"
|
import os
import pytest
from llama_index.multi_modal_llms.nvidia import NVIDIAMultiModal
from typing import Any
from llama_index.core.schema import ImageDocument
def get_api_key(instance: Any) -> str:
return instance.api_key
def test_create_default_url_without_api_key(masked_env_var: str) -> None:
with pytest.raises(ValueError) as err_msg:
NVIDIAMultiModal()
assert (
str(err_msg.value)
== "An API key is required for the hosted NIM. This will become an error in 0.2.0."
)
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_create_with_api_key(param: str, masked_env_var: str) -> None:
instance = NVIDIAMultiModal(**{param: "just testing no failure"})
assert get_api_key(instance) == "just testing no failure"
def test_api_key_priority(masked_env_var: str) -> None:
try:
os.environ["NVIDIA_API_KEY"] = "ENV"
assert get_api_key(NVIDIAMultiModal()) == "ENV"
assert get_api_key(NVIDIAMultiModal(nvidia_api_key="PARAM")) == "PARAM"
assert get_api_key(NVIDIAMultiModal(api_key="PARAM")) == "PARAM"
assert (
get_api_key(NVIDIAMultiModal(api_key="LOW", nvidia_api_key="HIGH"))
== "HIGH"
)
finally:
# we must clean up environ or it may impact other tests
del os.environ["NVIDIA_API_KEY"]
@pytest.mark.integration
def test_bogus_api_key_error(vlm_model: str, masked_env_var: str) -> None:
client = NVIDIAMultiModal(model=vlm_model, nvidia_api_key="BOGUS")
with pytest.raises(Exception) as exc_info:
client.complete(
prompt="xyz", image_documents=[ImageDocument(image_url="https://xyz.com")]
)
assert "401" in str(exc_info.value)
|
import os
import pytest
from llama_index.multi_modal_llms.nvidia import NVIDIAMultiModal
from typing import Any
from llama_index.core.schema import ImageDocument
def get_api_key(instance: Any) -> str:
return instance.api_key
def test_create_default_url_without_api_key(masked_env_var: str) -> None:
with pytest.raises(ValueError) as err_msg:
NVIDIAMultiModal()
assert (
str(err_msg.value)
== "An API key is required for the hosted NIM. This will become an error in 0.2.0."
)
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_create_with_api_key(param: str, masked_env_var: str) -> None:
instance = NVIDIAMultiModal(**{param: "just testing no failure"})
assert get_api_key(instance) == "just testing no failure"
def test_api_key_priority(masked_env_var: str) -> None:
try:
os.environ["NVIDIA_API_KEY"] = "ENV"
assert get_api_key(NVIDIAMultiModal()) == "ENV"
assert get_api_key(NVIDIAMultiModal(nvidia_api_key="PARAM")) == "PARAM"
assert get_api_key(NVIDIAMultiModal(api_key="PARAM")) == "PARAM"
assert (
get_api_key(NVIDIAMultiModal(api_key="LOW", nvidia_api_key="HIGH"))
== "HIGH"
)
finally:
# we must clean up environ or it may impact other tests
del os.environ["NVIDIA_API_KEY"]
@pytest.mark.integration()
def test_bogus_api_key_error(vlm_model: str, masked_env_var: str) -> None:
client = NVIDIAMultiModal(model=vlm_model, nvidia_api_key="BOGUS")
with pytest.raises(Exception) as exc_info:
client.complete(
prompt="xyz", image_documents=[ImageDocument(image_url="https://xyz.com")]
)
assert "401" in str(exc_info.value)
|
from enum import Enum
from typing import Any, Optional
from pydantic import BaseModel
from backend.data.block import BlockInput
class BlockCostType(str, Enum):
RUN = "run" # cost X credits per run
BYTE = "byte" # cost X credits per byte
SECOND = "second" # cost X credits per second
DOLLAR = "dollar" # cost X dollars per run
class BlockCost(BaseModel):
cost_amount: int
cost_filter: BlockInput
cost_type: BlockCostType
def __init__(
self,
cost_amount: int,
cost_type: BlockCostType = BlockCostType.RUN,
cost_filter: Optional[BlockInput] = None,
**data: Any,
) -> None:
super().__init__(
cost_amount=cost_amount,
cost_filter=cost_filter or {},
cost_type=cost_type,
**data,
)
|
from enum import Enum
from typing import Any, Optional
from pydantic import BaseModel
from backend.data.block import BlockInput
class BlockCostType(str, Enum):
RUN = "run" # cost X credits per run
BYTE = "byte" # cost X credits per byte
SECOND = "second" # cost X credits per second
class BlockCost(BaseModel):
cost_amount: int
cost_filter: BlockInput
cost_type: BlockCostType
def __init__(
self,
cost_amount: int,
cost_type: BlockCostType = BlockCostType.RUN,
cost_filter: Optional[BlockInput] = None,
**data: Any,
) -> None:
super().__init__(
cost_amount=cost_amount,
cost_filter=cost_filter or {},
cost_type=cost_type,
**data,
)
|
import os
import pytest
import torchaudio
from torchaudio.pipelines import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
WAV2VEC2_XLSR_1B,
WAV2VEC2_XLSR_300M,
WAVLM_BASE,
WAVLM_BASE_PLUS,
WAVLM_LARGE,
)
@pytest.mark.parametrize(
"bundle",
[
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
WAVLM_BASE,
WAVLM_BASE_PLUS,
WAVLM_LARGE,
],
)
def test_pretraining_models(bundle):
"""Smoke test of downloading weights for pretraining models"""
bundle.get_model()
@pytest.mark.skipif("CI" not in os.environ, reason="Run tests only in CI environment.")
@pytest.mark.parametrize(
"bundle",
[
WAV2VEC2_XLSR_300M,
WAV2VEC2_XLSR_1B,
],
)
def test_xlsr_pretraining_models(bundle):
"""Smoke test of downloading weights for pretraining models"""
bundle.get_model()
@pytest.mark.parametrize(
"bundle,lang,expected",
[
(WAV2VEC2_ASR_BASE_10M, "en", "I|HAD|THAT|CURIYOSSITY|BESID|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_10M, "en", "I|HAD|THAT|CURIOUSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_10M, "en", "I|HAD|THAT|CURIOUSITY|BESID|ME|AT|THISS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_100H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_960H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_LARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_XLARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(
VOXPOPULI_ASR_BASE_10K_EN,
"en2",
"i|hope|that|we|will|see|a|ddrasstic|decrease|of|funding|for|the|failed|eu|project|and|that|more|money|will|come|back|to|the|taxpayers", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_ES,
"es",
"la|primera|que|es|imprescindible|pensar|a|pequeña|a|escala|para|implicar|y|complementar|así|la|actuación|global", # noqa: E501
),
(VOXPOPULI_ASR_BASE_10K_DE, "de", "dabei|spielt|auch|eine|sorgfältige|berichterstattung|eine|wichtige|rolle"),
(
VOXPOPULI_ASR_BASE_10K_FR,
"fr",
"la|commission|va|faire|des|propositions|sur|ce|sujet|comment|mettre|en|place|cette|capacité|fiscale|et|le|conseil|européen|y|reviendra|sour|les|sujets|au|moins|de|mars", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_IT,
"it",
"credo|che|illatino|non|sia|contemplato|tra|le|traduzioni|e|quindi|mi|attengo|allitaliano",
),
],
)
def test_finetune_asr_model(
bundle,
lang,
expected,
sample_speech,
ctc_decoder,
):
"""Smoke test of downloading weights for fine-tuning models and simple transcription"""
model = bundle.get_model().eval()
waveform, sample_rate = torchaudio.load(sample_speech)
emission, _ = model(waveform)
decoder = ctc_decoder(bundle.get_labels())
result = decoder(emission[0])
assert result == expected
|
import pytest
import torchaudio
from torchaudio.pipelines import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
WAVLM_BASE,
WAVLM_BASE_PLUS,
WAVLM_LARGE,
)
@pytest.mark.parametrize(
"bundle",
[
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
WAVLM_BASE,
WAVLM_BASE_PLUS,
WAVLM_LARGE,
],
)
def test_pretraining_models(bundle):
"""Smoke test of downloading weights for pretraining models"""
bundle.get_model()
@pytest.mark.parametrize(
"bundle,lang,expected",
[
(WAV2VEC2_ASR_BASE_10M, "en", "I|HAD|THAT|CURIYOSSITY|BESID|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_10M, "en", "I|HAD|THAT|CURIOUSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_10M, "en", "I|HAD|THAT|CURIOUSITY|BESID|ME|AT|THISS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_100H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_960H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_LARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_XLARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(
VOXPOPULI_ASR_BASE_10K_EN,
"en2",
"i|hope|that|we|will|see|a|ddrasstic|decrease|of|funding|for|the|failed|eu|project|and|that|more|money|will|come|back|to|the|taxpayers", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_ES,
"es",
"la|primera|que|es|imprescindible|pensar|a|pequeña|a|escala|para|implicar|y|complementar|así|la|actuación|global", # noqa: E501
),
(VOXPOPULI_ASR_BASE_10K_DE, "de", "dabei|spielt|auch|eine|sorgfältige|berichterstattung|eine|wichtige|rolle"),
(
VOXPOPULI_ASR_BASE_10K_FR,
"fr",
"la|commission|va|faire|des|propositions|sur|ce|sujet|comment|mettre|en|place|cette|capacité|fiscale|et|le|conseil|européen|y|reviendra|sour|les|sujets|au|moins|de|mars", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_IT,
"it",
"credo|che|illatino|non|sia|contemplato|tra|le|traduzioni|e|quindi|mi|attengo|allitaliano",
),
],
)
def test_finetune_asr_model(
bundle,
lang,
expected,
sample_speech,
ctc_decoder,
):
"""Smoke test of downloading weights for fine-tuning models and simple transcription"""
model = bundle.get_model().eval()
waveform, sample_rate = torchaudio.load(sample_speech)
emission, _ = model(waveform)
decoder = ctc_decoder(bundle.get_labels())
result = decoder(emission[0])
assert result == expected
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_instance.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
num_classes=8,
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_head=dict(num_classes=8)))
# optimizer
# lr is set for a batch size of 8
optim_wrapper = dict(optimizer=dict(lr=0.01))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=8,
by_epoch=True,
# [7] yields higher performance than [6]
milestones=[7],
gamma=0.1)
]
# actual epoch = 8 * 8 = 64
train_cfg = dict(max_epochs=8)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
# TODO: support auto scaling lr
# auto_scale_lr = dict(base_batch_size=8)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=8,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
# [7] yields higher performance than [6]
milestones=[7],
gamma=0.1)
]
train_cfg = dict(by_epoch=True, max_epochs=8) # actual epoch = 8 * 8 = 64
val_cfg = dict(interval=1)
test_cfg = dict()
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
# TODO: support auto scaling lr
# auto_scale_lr = dict(base_batch_size=8)
|
import numpy as np
import pytest
from docarray import Document, DocumentArray
from docarray.document import BaseDocument
from docarray.typing import NdArray
@pytest.fixture()
def da():
class Text(Document):
text: str
return DocumentArray([Text(text='hello') for _ in range(10)])
def test_iterate(da):
for doc, doc2 in zip(da, da._data):
assert doc.id == doc2.id
def test_append():
class Text(Document):
text: str
da = DocumentArray[Text]([])
da.append(Text(text='hello', id='1'))
assert len(da) == 1
assert da[0].id == '1'
def test_extend():
class Text(Document):
text: str
da = DocumentArray[Text]([Text(text='hello', id=str(i)) for i in range(10)])
da.extend([Text(text='hello', id=str(10 + i)) for i in range(10)])
assert len(da) == 20
for da, i in zip(da, range(20)):
assert da.id == str(i)
def test_document_array():
class Text(Document):
text: str
da = DocumentArray([Text(text='hello') for _ in range(10)])
assert len(da) == 10
def test_document_array_fixed_type():
class Text(Document):
text: str
da = DocumentArray[Text]([Text(text='hello') for _ in range(10)])
assert len(da) == 10
def test_get_bulk_attributes_function():
class Mmdoc(BaseDocument):
text: str
tensor: NdArray
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'hello{i}', tensor=np.zeros((3, 224, 224))) for i in range(N))
)
tensors = da._get_array_attribute('tensor')
assert len(tensors) == N
for tensor in tensors:
assert tensor.shape == (3, 224, 224)
texts = da._get_array_attribute('text')
assert len(texts) == N
for i, text in enumerate(texts):
assert text == f'hello{i}'
def test_set_attributes():
class InnerDoc(BaseDocument):
text: str
class Mmdoc(BaseDocument):
inner: InnerDoc
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(inner=InnerDoc(text=f'hello{i}')) for i in range(N))
)
list_docs = [InnerDoc(text=f'hello{i}') for i in range(N)]
da._set_array_attribute('inner', list_docs)
for doc, list_doc in zip(da, list_docs):
assert doc.inner is list_doc
def test_get_bulk_attributes():
class Mmdoc(BaseDocument):
text: str
tensor: NdArray
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'hello{i}', tensor=np.zeros((3, 224, 224))) for i in range(N))
)
tensors = da.tensor
assert len(tensors) == N
for tensor in tensors:
assert tensor.shape == (3, 224, 224)
texts = da.text
assert len(texts) == N
for i, text in enumerate(texts):
assert text == f'hello{i}'
def test_get_bulk_attributes_document():
class InnerDoc(BaseDocument):
text: str
class Mmdoc(BaseDocument):
inner: InnerDoc
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(inner=InnerDoc(text=f'hello{i}')) for i in range(N))
)
assert isinstance(da.inner, DocumentArray)
|
from docarray import DocumentArray, Document
def test_document_array():
class Text(Document):
text: str
da = DocumentArray([Text(text='hello') for _ in range(10)])
def test_document_array_fixed_type():
class Text(Document):
text: str
da = DocumentArray[Text]([Text(text='hello') for _ in range(10)])
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='VFNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='VFNetHead',
num_classes=80,
in_channels=256,
stacked_convs=3,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
center_sampling=False,
dcn_on_last_conv=False,
use_atss=True,
use_vfl=True,
loss_cls=dict(
type='VarifocalLoss',
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.5),
loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# data setting
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
clip_grad=None)
# learning rate
max_epochs = 12
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='VFNet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='VFNetHead',
num_classes=80,
in_channels=256,
stacked_convs=3,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
center_sampling=False,
dcn_on_last_conv=False,
use_atss=True,
use_vfl=True,
loss_cls=dict(
type='VarifocalLoss',
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.5),
loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# data setting
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.01),
paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
clip_grad=None)
# learning rate
max_epochs = 12
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import FacebookChatLoader
from langchain_community.document_loaders.facebook_chat import concatenate_rows
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"concatenate_rows": "langchain_community.document_loaders.facebook_chat",
"FacebookChatLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FacebookChatLoader",
"concatenate_rows",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import FacebookChatLoader
from langchain_community.document_loaders.facebook_chat import concatenate_rows
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"concatenate_rows": "langchain_community.document_loaders.facebook_chat",
"FacebookChatLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"concatenate_rows",
"FacebookChatLoader",
]
|
from setuptools import find_packages
import setuptools
setuptools.setup(
name="jina_executors",
packages=find_packages(where=".", exclude=('tests',)),
include_package_data=True,
version="0.0.1",
author='Jina Dev Team',
author_email='dev-team@jina.ai',
description="A selection of Executors for Jina",
url="https://github.com/jina-ai/executors",
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
)
|
from setuptools import find_packages
import setuptools
setuptools.setup(
name="jina-executors",
version="0.0.1",
author='Jina Dev Team',
author_email='dev-team@jina.ai',
description="A selection of Executors for Jina",
url="https://github.com/jina-ai/executors",
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
packages=find_packages(where='.', include=['jinahub.*']),
python_requires=">=3.7",
)
|
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import PromptTemplate
class FinishedOutputParser(BaseOutputParser[tuple[str, bool]]):
"""Output parser that checks if the output is finished."""
finished_value: str = "FINISHED"
"""Value that indicates the output is finished."""
def parse(self, text: str) -> tuple[str, bool]:
cleaned = text.strip()
finished = self.finished_value in cleaned
return cleaned.replace(self.finished_value, ""), finished
PROMPT_TEMPLATE = """\
Respond to the user message using any relevant context. \
If context is provided, you should ground your answer in that context. \
Once you're done responding return FINISHED.
>>> CONTEXT: {context}
>>> USER INPUT: {user_input}
>>> RESPONSE: {response}\
"""
PROMPT = PromptTemplate(
template=PROMPT_TEMPLATE,
input_variables=["user_input", "context", "response"],
)
QUESTION_GENERATOR_PROMPT_TEMPLATE = """\
Given a user input and an existing partial response as context, \
ask a question to which the answer is the given term/entity/phrase:
>>> USER INPUT: {user_input}
>>> EXISTING PARTIAL RESPONSE: {current_response}
The question to which the answer is the term/entity/phrase "{uncertain_span}" is:"""
QUESTION_GENERATOR_PROMPT = PromptTemplate(
template=QUESTION_GENERATOR_PROMPT_TEMPLATE,
input_variables=["user_input", "current_response", "uncertain_span"],
)
|
from typing import Tuple
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import PromptTemplate
class FinishedOutputParser(BaseOutputParser[Tuple[str, bool]]):
"""Output parser that checks if the output is finished."""
finished_value: str = "FINISHED"
"""Value that indicates the output is finished."""
def parse(self, text: str) -> Tuple[str, bool]:
cleaned = text.strip()
finished = self.finished_value in cleaned
return cleaned.replace(self.finished_value, ""), finished
PROMPT_TEMPLATE = """\
Respond to the user message using any relevant context. \
If context is provided, you should ground your answer in that context. \
Once you're done responding return FINISHED.
>>> CONTEXT: {context}
>>> USER INPUT: {user_input}
>>> RESPONSE: {response}\
"""
PROMPT = PromptTemplate(
template=PROMPT_TEMPLATE,
input_variables=["user_input", "context", "response"],
)
QUESTION_GENERATOR_PROMPT_TEMPLATE = """\
Given a user input and an existing partial response as context, \
ask a question to which the answer is the given term/entity/phrase:
>>> USER INPUT: {user_input}
>>> EXISTING PARTIAL RESPONSE: {current_response}
The question to which the answer is the term/entity/phrase "{uncertain_span}" is:"""
QUESTION_GENERATOR_PROMPT = PromptTemplate(
template=QUESTION_GENERATOR_PROMPT_TEMPLATE,
input_variables=["user_input", "current_response", "uncertain_span"],
)
|
from typing import Optional
import torch
from docarray import Document, DocumentArray
from docarray.typing import TorchTensor
def test_torch_train():
class Mmdoc(Document):
text: str
tensor: Optional[TorchTensor[3, 224, 224]]
N = 10
batch = DocumentArray[Mmdoc](Mmdoc(text=f'hello{i}') for i in range(N))
batch.tensor = torch.zeros(N, 3, 224, 224)
batch = batch.stack()
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 16, 3)
def forward(self, x):
return self.conv(x)
model = Model()
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for _ in range(2):
loss = model(batch.tensor).sum()
loss.backward()
opt.step()
|
from typing import Optional
import torch
from docarray import Document, DocumentArray
from docarray.typing import TorchTensor
def test_torch_train():
class Mmdoc(Document):
text: str
tensor: Optional[TorchTensor[3, 224, 224]]
N = 10
batch = DocumentArray[Mmdoc](Mmdoc(text=f'hello{i}') for i in range(N))
batch.tensor = torch.zeros(N, 3, 224, 224)
batch.stack()
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 16, 3)
def forward(self, x):
return self.conv(x)
model = Model()
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
for _ in range(2):
loss = model(batch.tensor).sum()
loss.backward()
opt.step()
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from docarray.utils._internal.misc import is_jax_available
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp
from docarray.computation.jax_backend import JaxCompBackend
from docarray.typing import JaxArray
metrics = JaxCompBackend.Metrics
else:
metrics = None
@pytest.mark.jax
def test_top_k_descending_false():
top_k = JaxCompBackend.Retrieval.top_k
a = JaxArray(jnp.array([1, 4, 2, 7, 4, 9, 2]))
vals, indices = top_k(a, 3, descending=False)
assert vals.tensor.shape == (1, 3)
assert indices.tensor.shape == (1, 3)
assert jnp.allclose(jnp.squeeze(vals.tensor), jnp.array([1, 2, 2]))
assert jnp.allclose(jnp.squeeze(indices.tensor), jnp.array([0, 2, 6])) or (
jnp.allclose(jnp.squeeze.indices.tensor),
jnp.array([0, 6, 2]),
)
a = JaxArray(jnp.array([[1, 4, 2, 7, 4, 9, 2], [11, 6, 2, 7, 3, 10, 4]]))
vals, indices = top_k(a, 3, descending=False)
assert vals.tensor.shape == (2, 3)
assert indices.tensor.shape == (2, 3)
assert jnp.allclose(vals.tensor[0], jnp.array([1, 2, 2]))
assert jnp.allclose(indices.tensor[0], jnp.array([0, 2, 6])) or jnp.allclose(
indices.tensor[0], jnp.array([0, 6, 2])
)
assert jnp.allclose(vals.tensor[1], jnp.array([2, 3, 4]))
assert jnp.allclose(indices.tensor[1], jnp.array([2, 4, 6]))
@pytest.mark.jax
def test_top_k_descending_true():
top_k = JaxCompBackend.Retrieval.top_k
a = JaxArray(jnp.array([1, 4, 2, 7, 4, 9, 2]))
vals, indices = top_k(a, 3, descending=True)
assert vals.tensor.shape == (1, 3)
assert indices.tensor.shape == (1, 3)
assert jnp.allclose(jnp.squeeze(vals.tensor), jnp.array([9, 7, 4]))
assert jnp.allclose(jnp.squeeze(indices.tensor), jnp.array([5, 3, 1]))
a = JaxArray(jnp.array([[1, 4, 2, 7, 4, 9, 2], [11, 6, 2, 7, 3, 10, 4]]))
vals, indices = top_k(a, 3, descending=True)
assert vals.tensor.shape == (2, 3)
assert indices.tensor.shape == (2, 3)
assert jnp.allclose(vals.tensor[0], jnp.array([9, 7, 4]))
assert jnp.allclose(indices.tensor[0], jnp.array([5, 3, 1]))
assert jnp.allclose(vals.tensor[1], jnp.array([11, 10, 7]))
assert jnp.allclose(indices.tensor[1], jnp.array([0, 5, 3]))
|
import pytest
from docarray.utils._internal.misc import is_jax_available
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp
from docarray.computation.jax_backend import JaxCompBackend
from docarray.typing import JaxArray
metrics = JaxCompBackend.Metrics
else:
metrics = None
@pytest.mark.jax
def test_top_k_descending_false():
top_k = JaxCompBackend.Retrieval.top_k
a = JaxArray(jnp.array([1, 4, 2, 7, 4, 9, 2]))
vals, indices = top_k(a, 3, descending=False)
assert vals.tensor.shape == (1, 3)
assert indices.tensor.shape == (1, 3)
assert jnp.allclose(jnp.squeeze(vals.tensor), jnp.array([1, 2, 2]))
assert jnp.allclose(jnp.squeeze(indices.tensor), jnp.array([0, 2, 6])) or (
jnp.allclose(jnp.squeeze.indices.tensor),
jnp.array([0, 6, 2]),
)
a = JaxArray(jnp.array([[1, 4, 2, 7, 4, 9, 2], [11, 6, 2, 7, 3, 10, 4]]))
vals, indices = top_k(a, 3, descending=False)
assert vals.tensor.shape == (2, 3)
assert indices.tensor.shape == (2, 3)
assert jnp.allclose(vals.tensor[0], jnp.array([1, 2, 2]))
assert jnp.allclose(indices.tensor[0], jnp.array([0, 2, 6])) or jnp.allclose(
indices.tensor[0], jnp.array([0, 6, 2])
)
assert jnp.allclose(vals.tensor[1], jnp.array([2, 3, 4]))
assert jnp.allclose(indices.tensor[1], jnp.array([2, 4, 6]))
@pytest.mark.jax
def test_top_k_descending_true():
top_k = JaxCompBackend.Retrieval.top_k
a = JaxArray(jnp.array([1, 4, 2, 7, 4, 9, 2]))
vals, indices = top_k(a, 3, descending=True)
assert vals.tensor.shape == (1, 3)
assert indices.tensor.shape == (1, 3)
assert jnp.allclose(jnp.squeeze(vals.tensor), jnp.array([9, 7, 4]))
assert jnp.allclose(jnp.squeeze(indices.tensor), jnp.array([5, 3, 1]))
a = JaxArray(jnp.array([[1, 4, 2, 7, 4, 9, 2], [11, 6, 2, 7, 3, 10, 4]]))
vals, indices = top_k(a, 3, descending=True)
assert vals.tensor.shape == (2, 3)
assert indices.tensor.shape == (2, 3)
assert jnp.allclose(vals.tensor[0], jnp.array([9, 7, 4]))
assert jnp.allclose(indices.tensor[0], jnp.array([5, 3, 1]))
assert jnp.allclose(vals.tensor[1], jnp.array([11, 10, 7]))
assert jnp.allclose(indices.tensor[1], jnp.array([0, 5, 3]))
|
from llama_index.core.tools.types import BaseTool, ToolOutput, adapt_to_async_tool
from typing import TYPE_CHECKING, Sequence
from llama_index.core.llms.llm import ToolSelection
import json
if TYPE_CHECKING:
from llama_index.core.tools.types import BaseTool
def call_tool(tool: BaseTool, arguments: dict) -> ToolOutput:
"""Call a tool with arguments."""
try:
if (
len(tool.metadata.get_parameters_dict()["properties"]) == 1
and len(arguments) == 1
):
try:
single_arg = arguments[next(iter(arguments))]
return tool(single_arg)
except Exception:
# some tools will REQUIRE kwargs, so try it
return tool(**arguments)
else:
return tool(**arguments)
except Exception as e:
return ToolOutput(
content="Encountered error: " + str(e),
tool_name=tool.metadata.get_name(),
raw_input=arguments,
raw_output=str(e),
is_error=True,
)
async def acall_tool(tool: BaseTool, arguments: dict) -> ToolOutput:
"""Call a tool with arguments asynchronously."""
async_tool = adapt_to_async_tool(tool)
try:
if (
len(tool.metadata.get_parameters_dict()["properties"]) == 1
and len(arguments) == 1
):
try:
single_arg = arguments[next(iter(arguments))]
return await async_tool.acall(single_arg)
except Exception:
# some tools will REQUIRE kwargs, so try it
return await async_tool.acall(**arguments)
else:
return await async_tool.acall(**arguments)
except Exception as e:
return ToolOutput(
content="Encountered error: " + str(e),
tool_name=tool.metadata.get_name(),
raw_input=arguments,
raw_output=str(e),
is_error=True,
)
def call_tool_with_selection(
tool_call: ToolSelection,
tools: Sequence["BaseTool"],
verbose: bool = False,
) -> ToolOutput:
from llama_index.core.tools.calling import call_tool
tools_by_name = {tool.metadata.name: tool for tool in tools}
name = tool_call.tool_name
if verbose:
arguments_str = json.dumps(tool_call.tool_kwargs)
print("=== Calling Function ===")
print(f"Calling function: {name} with args: {arguments_str}")
tool = tools_by_name[name]
output = call_tool(tool, tool_call.tool_kwargs)
if verbose:
print("=== Function Output ===")
print(output.content)
return output
async def acall_tool_with_selection(
tool_call: ToolSelection,
tools: Sequence["BaseTool"],
verbose: bool = False,
) -> ToolOutput:
from llama_index.core.tools.calling import acall_tool
tools_by_name = {tool.metadata.name: tool for tool in tools}
name = tool_call.tool_name
if verbose:
arguments_str = json.dumps(tool_call.tool_kwargs)
print("=== Calling Function ===")
print(f"Calling function: {name} with args: {arguments_str}")
tool = tools_by_name[name]
output = await acall_tool(tool, tool_call.tool_kwargs)
if verbose:
print("=== Function Output ===")
print(output.content)
return output
|
from llama_index.core.tools.types import BaseTool, ToolOutput, adapt_to_async_tool
from typing import TYPE_CHECKING, Sequence
from llama_index.core.llms.llm import ToolSelection
import json
if TYPE_CHECKING:
from llama_index.core.tools.types import BaseTool
def call_tool(tool: BaseTool, arguments: dict) -> ToolOutput:
"""Call a tool with arguments."""
try:
if (
len(tool.metadata.get_parameters_dict()["properties"]) == 1
and len(arguments) == 1
):
try:
single_arg = arguments[next(iter(arguments))]
return tool(single_arg)
except Exception:
# some tools will REQUIRE kwargs, so try it
return tool(**arguments)
else:
return tool(**arguments)
except Exception as e:
return ToolOutput(
content="Encountered error: " + str(e),
tool_name=tool.metadata.name,
raw_input=arguments,
raw_output=str(e),
is_error=True,
)
async def acall_tool(tool: BaseTool, arguments: dict) -> ToolOutput:
"""Call a tool with arguments asynchronously."""
async_tool = adapt_to_async_tool(tool)
try:
if (
len(tool.metadata.get_parameters_dict()["properties"]) == 1
and len(arguments) == 1
):
try:
single_arg = arguments[next(iter(arguments))]
return await async_tool.acall(single_arg)
except Exception:
# some tools will REQUIRE kwargs, so try it
return await async_tool.acall(**arguments)
else:
return await async_tool.acall(**arguments)
except Exception as e:
return ToolOutput(
content="Encountered error: " + str(e),
tool_name=tool.metadata.name,
raw_input=arguments,
raw_output=str(e),
is_error=True,
)
def call_tool_with_selection(
tool_call: ToolSelection,
tools: Sequence["BaseTool"],
verbose: bool = False,
) -> ToolOutput:
from llama_index.core.tools.calling import call_tool
tools_by_name = {tool.metadata.name: tool for tool in tools}
name = tool_call.tool_name
if verbose:
arguments_str = json.dumps(tool_call.tool_kwargs)
print("=== Calling Function ===")
print(f"Calling function: {name} with args: {arguments_str}")
tool = tools_by_name[name]
output = call_tool(tool, tool_call.tool_kwargs)
if verbose:
print("=== Function Output ===")
print(output.content)
return output
async def acall_tool_with_selection(
tool_call: ToolSelection,
tools: Sequence["BaseTool"],
verbose: bool = False,
) -> ToolOutput:
from llama_index.core.tools.calling import acall_tool
tools_by_name = {tool.metadata.name: tool for tool in tools}
name = tool_call.tool_name
if verbose:
arguments_str = json.dumps(tool_call.tool_kwargs)
print("=== Calling Function ===")
print(f"Calling function: {name} with args: {arguments_str}")
tool = tools_by_name[name]
output = await acall_tool(tool, tool_call.tool_kwargs)
if verbose:
print("=== Function Output ===")
print(output.content)
return output
|
"""CIFAR100 small images classification dataset."""
import os
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.datasets.cifar import load_batch
from keras.src.utils.file_utils import get_file
@keras_export("keras.datasets.cifar100.load_data")
def load_data(label_mode="fine"):
"""Loads the CIFAR100 dataset.
This is a dataset of 50,000 32x32 color training images and
10,000 test images, labeled over 100 fine-grained classes that are
grouped into 20 coarse-grained classes. See more info at the
[CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html).
Args:
label_mode: one of `"fine"`, `"coarse"`.
If it is `"fine"`, the category labels
are the fine-grained labels, and if it is `"coarse"`,
the output labels are the coarse-grained superclasses.
Returns:
Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`**: `uint8` NumPy array of grayscale image data with shapes
`(50000, 32, 32, 3)`, containing the training data. Pixel values range
from 0 to 255.
**`y_train`**: `uint8` NumPy array of labels (integers in range 0-99)
with shape `(50000, 1)` for the training data.
**`x_test`**: `uint8` NumPy array of grayscale image data with shapes
`(10000, 32, 32, 3)`, containing the test data. Pixel values range
from 0 to 255.
**`y_test`**: `uint8` NumPy array of labels (integers in range 0-99)
with shape `(10000, 1)` for the test data.
Example:
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
assert x_train.shape == (50000, 32, 32, 3)
assert x_test.shape == (10000, 32, 32, 3)
assert y_train.shape == (50000, 1)
assert y_test.shape == (10000, 1)
```
"""
if label_mode not in ["fine", "coarse"]:
raise ValueError(
'`label_mode` must be one of `"fine"`, `"coarse"`. '
f"Received: label_mode={label_mode}."
)
dirname = "cifar-100-python-target"
origin = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
path = get_file(
fname=dirname,
origin=origin,
extract=True,
file_hash=( # noqa: E501
"85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7"
),
)
path = os.path.join(path, "cifar-100-python")
fpath = os.path.join(path, "train")
x_train, y_train = load_batch(fpath, label_key=label_mode + "_labels")
fpath = os.path.join(path, "test")
x_test, y_test = load_batch(fpath, label_key=label_mode + "_labels")
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if backend.image_data_format() == "channels_last":
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
|
"""CIFAR100 small images classification dataset."""
import os
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.datasets.cifar import load_batch
from keras.src.utils.file_utils import get_file
@keras_export("keras.datasets.cifar100.load_data")
def load_data(label_mode="fine"):
"""Loads the CIFAR100 dataset.
This is a dataset of 50,000 32x32 color training images and
10,000 test images, labeled over 100 fine-grained classes that are
grouped into 20 coarse-grained classes. See more info at the
[CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html).
Args:
label_mode: one of `"fine"`, `"coarse"`.
If it is `"fine"`, the category labels
are the fine-grained labels, and if it is `"coarse"`,
the output labels are the coarse-grained superclasses.
Returns:
Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`**: `uint8` NumPy array of grayscale image data with shapes
`(50000, 32, 32, 3)`, containing the training data. Pixel values range
from 0 to 255.
**`y_train`**: `uint8` NumPy array of labels (integers in range 0-99)
with shape `(50000, 1)` for the training data.
**`x_test`**: `uint8` NumPy array of grayscale image data with shapes
`(10000, 32, 32, 3)`, containing the test data. Pixel values range
from 0 to 255.
**`y_test`**: `uint8` NumPy array of labels (integers in range 0-99)
with shape `(10000, 1)` for the test data.
Example:
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
assert x_train.shape == (50000, 32, 32, 3)
assert x_test.shape == (10000, 32, 32, 3)
assert y_train.shape == (50000, 1)
assert y_test.shape == (10000, 1)
```
"""
if label_mode not in ["fine", "coarse"]:
raise ValueError(
'`label_mode` must be one of `"fine"`, `"coarse"`. '
f"Received: label_mode={label_mode}."
)
dirname = "cifar-100-python"
origin = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
path = get_file(
fname=dirname,
origin=origin,
untar=True,
file_hash=( # noqa: E501
"85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7"
),
)
fpath = os.path.join(path, "train")
x_train, y_train = load_batch(fpath, label_key=label_mode + "_labels")
fpath = os.path.join(path, "test")
x_test, y_test = load_batch(fpath, label_key=label_mode + "_labels")
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if backend.image_data_format() == "channels_last":
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
|
import numpy as np
import pytest
import torch
from docarray.base_doc import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyUrl, NdArray, TorchTensor
@pytest.fixture()
def doc_and_class():
class Mmdoc(BaseDoc):
img: NdArray
url: AnyUrl
txt: str
torch_tensor: TorchTensor
bytes_: bytes
doc = Mmdoc(
img=np.zeros((10)),
url='http://doccaray.io',
txt='hello',
torch_tensor=torch.zeros(10),
bytes_=b'hello',
)
return doc, Mmdoc
def test_to_json(doc_and_class):
doc, _ = doc_and_class
doc.json()
def test_from_json(doc_and_class):
doc, Mmdoc = doc_and_class
new_doc = Mmdoc.parse_raw(doc.json())
for (field, field2) in zip(doc.dict().keys(), new_doc.dict().keys()):
if field in ['torch_tensor', 'img']:
assert (getattr(doc, field) == getattr(doc, field2)).all()
else:
assert getattr(doc, field) == getattr(doc, field2)
def test_to_dict_to_json(doc_and_class):
doc, Mmdoc = doc_and_class
new_doc = Mmdoc.parse_raw(orjson_dumps(doc.dict()))
for (field, field2) in zip(doc.dict().keys(), new_doc.dict().keys()):
if field in ['torch_tensor', 'img']:
assert (getattr(doc, field) == getattr(doc, field2)).all()
else:
assert getattr(doc, field) == getattr(doc, field2)
|
import numpy as np
import pytest
import torch
from docarray.base_document import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import AnyUrl, NdArray, TorchTensor
@pytest.fixture()
def doc_and_class():
class Mmdoc(BaseDocument):
img: NdArray
url: AnyUrl
txt: str
torch_tensor: TorchTensor
bytes_: bytes
doc = Mmdoc(
img=np.zeros((10)),
url='http://doccaray.io',
txt='hello',
torch_tensor=torch.zeros(10),
bytes_=b'hello',
)
return doc, Mmdoc
def test_to_json(doc_and_class):
doc, _ = doc_and_class
doc.json()
def test_from_json(doc_and_class):
doc, Mmdoc = doc_and_class
new_doc = Mmdoc.parse_raw(doc.json())
for (field, field2) in zip(doc.dict().keys(), new_doc.dict().keys()):
if field in ['torch_tensor', 'img']:
assert (getattr(doc, field) == getattr(doc, field2)).all()
else:
assert getattr(doc, field) == getattr(doc, field2)
def test_to_dict_to_json(doc_and_class):
doc, Mmdoc = doc_and_class
new_doc = Mmdoc.parse_raw(orjson_dumps(doc.dict()))
for (field, field2) in zip(doc.dict().keys(), new_doc.dict().keys()):
if field in ['torch_tensor', 'img']:
assert (getattr(doc, field) == getattr(doc, field2)).all()
else:
assert getattr(doc, field) == getattr(doc, field2)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS
@HEADS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(self,
num_ins,
fusion_level,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
ignore_label=255,
loss_weight=0.2,
conv_cfg=None,
norm_cfg=None,
init_cfg=dict(
type='Kaiming', override=dict(name='conv_logits'))):
super(FusedSemanticHead, self).__init__(init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.ignore_label = ignore_label
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label)
@auto_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
x += self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
loss_semantic_seg *= self.loss_weight
return loss_semantic_seg
|
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS
@HEADS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(self,
num_ins,
fusion_level,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
ignore_label=255,
loss_weight=0.2,
conv_cfg=None,
norm_cfg=None,
init_cfg=dict(
type='Kaiming', override=dict(name='conv_logits'))):
super(FusedSemanticHead, self).__init__(init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.ignore_label = ignore_label
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label)
@auto_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
x += self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
loss_semantic_seg *= self.loss_weight
return loss_semantic_seg
|
"""
This script showcases a recommended approach to perform semantic search using quantized embeddings with FAISS and usearch.
In particular, it uses binary search with int8 rescoring. The binary search is highly efficient, and its index can be kept
in memory even for massive datasets: it takes (num_dimensions * num_documents / 8) bytes, i.e. 1.19GB for 10 million embeddings.
"""
import json
import os
import time
import numpy as np
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings
from datasets import load_dataset
import faiss
from usearch.index import Index
# We use usearch as it can efficiently load int8 vectors from disk.
# Load the model
# NOTE: Because we are only comparing questions here, we will use the "query" prompt for everything.
# Normally you don't use this prompt for documents, but only for the queries
model = SentenceTransformer(
"mixedbread-ai/mxbai-embed-large-v1",
prompts={"query": "Represent this sentence for searching relevant passages: "},
default_prompt_name="query",
)
# Load a corpus with texts
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# Apply some default query
query = "How do I become a good programmer?"
# Try to load the precomputed binary and int8 indices
if os.path.exists("quora_faiss_ubinary.index"):
binary_index: faiss.IndexBinaryFlat = faiss.read_index_binary("quora_faiss_ubinary.index")
int8_view = Index.restore("quora_usearch_int8.index", view=True)
else:
# Encode the corpus using the full precision
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# Convert the embeddings to "ubinary" for efficient FAISS search
ubinary_embeddings = quantize_embeddings(full_corpus_embeddings, "ubinary")
binary_index = faiss.IndexBinaryFlat(1024)
binary_index.add(ubinary_embeddings)
faiss.write_index_binary(binary_index, "quora_faiss_ubinary.index")
# Convert the embeddings to "int8" for efficiently loading int8 indices with usearch
int8_embeddings = quantize_embeddings(full_corpus_embeddings, "int8")
index = Index(ndim=1024, metric="ip", dtype="i8")
index.add(np.arange(len(int8_embeddings)), int8_embeddings)
index.save("quora_usearch_int8.index")
del index
# Load the int8 index as a view, which does not cost any memory
int8_view = Index.restore("quora_usearch_int8.index", view=True)
def search(query, top_k: int = 10, rescore_multiplier: int = 4):
# 1. Embed the query as float32
start_time = time.time()
query_embedding = model.encode(query)
embed_time = time.time() - start_time
# 2. Quantize the query to ubinary
start_time = time.time()
query_embedding_ubinary = quantize_embeddings(query_embedding.reshape(1, -1), "ubinary")
quantize_time = time.time() - start_time
# 3. Search the binary index
start_time = time.time()
_scores, binary_ids = binary_index.search(query_embedding_ubinary, top_k * rescore_multiplier)
binary_ids = binary_ids[0]
search_time = time.time() - start_time
# 4. Load the corresponding int8 embeddings
start_time = time.time()
int8_embeddings = int8_view[binary_ids].astype(int)
load_time = time.time() - start_time
# 5. Rescore the top_k * rescore_multiplier using the float32 query embedding and the int8 document embeddings
start_time = time.time()
scores = query_embedding @ int8_embeddings.T
rescore_time = time.time() - start_time
# 6. Sort the scores and return the top_k
start_time = time.time()
indices = (-scores).argsort()[:top_k]
top_k_indices = binary_ids[indices]
top_k_scores = scores[indices]
sort_time = time.time() - start_time
return (
top_k_scores.tolist(),
top_k_indices.tolist(),
{
"Embed Time": f"{embed_time:.4f} s",
"Quantize Time": f"{quantize_time:.4f} s",
"Search Time": f"{search_time:.4f} s",
"Load Time": f"{load_time:.4f} s",
"Rescore Time": f"{rescore_time:.4f} s",
"Sort Time": f"{sort_time:.4f} s",
"Total Retrieval Time": f"{quantize_time + search_time + load_time + rescore_time + sort_time:.4f} s",
},
)
while True:
scores, indices, timings = search(query)
# Output the results
print(f"Timings:\n{json.dumps(timings, indent=2)}")
print(f"Query: {query}")
for score, index in zip(scores, indices):
print(f"(Score: {score:.4f}) {corpus[index]}")
print("")
# 10. Prompt for more queries
query = input("Please enter a question: ")
|
"""
This script showcases a recommended approach to perform semantic search using quantized embeddings with FAISS and usearch.
In particular, it uses binary search with int8 rescoring. The binary search is highly efficient, and its index can be kept
in memory even for massive datasets: it takes (num_dimensions * num_documents / 8) bytes, i.e. 1.19GB for 10 million embeddings.
"""
import json
import os
import time
import numpy as np
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings
from datasets import load_dataset
import faiss
from usearch.index import Index
# We use usearch as it can efficiently load int8 vectors from disk.
# Load the model
# NOTE: Because we are only comparing questions here, we will use the "query" prompt for everything.
# Normally you don't use this prompt for documents, but only for the queries
model = SentenceTransformer(
"mixedbread-ai/mxbai-embed-large-v1",
prompts={"query": "Represent this sentence for searching relevant passages: "},
default_prompt_name="query",
)
# Load a corpus with texts
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# Apply some default query
query = "How do I become a good programmer?"
# Try to load the precomputed binary and int8 indices
if os.path.exists("quora_faiss_ubinary.index"):
binary_index: faiss.IndexBinaryFlat = faiss.read_index_binary("quora_faiss_ubinary.index")
int8_view = Index.restore("quora_usearch_int8.index", view=True)
else:
# Encode the corpus using the full precision
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# Convert the embeddings to "ubinary" for efficient FAISS search
ubinary_embeddings = quantize_embeddings(full_corpus_embeddings, "ubinary")
binary_index = faiss.IndexBinaryFlat(1024)
binary_index.add(ubinary_embeddings)
faiss.write_index_binary(binary_index, "quora_faiss_ubinary.index")
# Convert the embeddings to "int8" for efficiently loading int8 indices with usearch
int8_embeddings = quantize_embeddings(full_corpus_embeddings, "int8")
index = Index(ndim=1024, metric="ip", dtype="i8")
index.add(np.arange(len(int8_embeddings)), int8_embeddings)
index.save("quora_usearch_int8.index")
del index
# Load the int8 index as a view, which does not cost any memory
int8_view = Index.restore("quora_usearch_int8.index", view=True)
def search(query, top_k: int = 10, rescore_multiplier: int = 4):
# 1. Embed the query as float32
start_time = time.time()
query_embedding = model.encode(query)
embed_time = time.time() - start_time
# 2. Quantize the query to ubinary
start_time = time.time()
query_embedding_ubinary = quantize_embeddings(query_embedding.reshape(1, -1), "ubinary")
quantize_time = time.time() - start_time
# 3. Search the binary index
start_time = time.time()
_scores, binary_ids = binary_index.search(query_embedding_ubinary, top_k * rescore_multiplier)
binary_ids = binary_ids[0]
search_time = time.time() - start_time
# 4. Load the corresponding int8 embeddings
start_time = time.time()
int8_embeddings = int8_view[binary_ids].astype(int)
load_time = time.time() - start_time
# 5. Rescore the top_k * rescore_multiplier using the float32 query embedding and the int8 document embeddings
start_time = time.time()
scores = query_embedding @ int8_embeddings.T
rescore_time = time.time() - start_time
# 6. Sort the scores and return the top_k
start_time = time.time()
indices = scores.argsort()[:top_k]
top_k_indices = binary_ids[indices]
top_k_scores = scores[indices]
sort_time = time.time() - start_time
return (
top_k_scores.tolist(),
top_k_indices.tolist(),
{
"Embed Time": f"{embed_time:.4f} s",
"Quantize Time": f"{quantize_time:.4f} s",
"Search Time": f"{search_time:.4f} s",
"Load Time": f"{load_time:.4f} s",
"Rescore Time": f"{rescore_time:.4f} s",
"Sort Time": f"{sort_time:.4f} s",
"Total Retrieval Time": f"{quantize_time + search_time + load_time + rescore_time + sort_time:.4f} s",
},
)
while True:
scores, indices, timings = search(query)
# Output the results
print(f"Timings:\n{json.dumps(timings, indent=2)}")
print(f"Query: {query}")
for score, index in zip(scores, indices):
print(f"(Score: {score:.4f}) {corpus[index]}")
print("")
# 10. Prompt for more queries
query = input("Please enter a question: ")
|
__version__ = '0.14.8'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.14.7'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
# flake8: noqa
from . import utils
from .utils import get_audio_backend, list_audio_backends, set_audio_backend
utils._init_audio_backend()
|
# flake8: noqa
from . import utils
from .utils import (
list_audio_backends,
get_audio_backend,
set_audio_backend,
)
utils._init_audio_backend()
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CrossEncoderCorrelationEvaluator
from sentence_transformers.cross_encoder.losses.BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 4
output_dir = "output/training_ce_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 1 label
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, or rerankers like Alibaba-NLP/gte-reranker-modernbert-base
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=1)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss, we use one that accepts pairs with a binary label
loss = BinaryCrossEntropyLoss(model)
# 4. Before and during training, we use CrossEncoderClassificationEvaluator to measure the performance on the dev set
eval_evaluator = CrossEncoderCorrelationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
scores=eval_dataset["score"],
name="stsb-validation",
)
eval_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-stsb"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=80,
save_strategy="steps",
save_steps=80,
save_total_limit=2,
logging_steps=20,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=eval_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_evaluator = CrossEncoderCorrelationEvaluator(
sentence_pairs=list(zip(test_dataset["sentence1"], test_dataset["sentence2"])),
scores=test_dataset["score"],
name="stsb-test",
)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
from sentence_transformers.cross_encoder.losses.BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 4
output_dir = "output/training_ce_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 1 label
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, or rerankers like Alibaba-NLP/gte-reranker-modernbert-base
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=1)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss, we use one that accepts pairs with a binary label
loss = BinaryCrossEntropyLoss(model)
# 4. Before and during training, we use CEClassificationEvaluator to measure the performance on the dev set
eval_evaluator = CECorrelationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
scores=eval_dataset["score"],
name="stsb-validation",
)
eval_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-stsb"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=80,
save_strategy="steps",
save_steps=80,
save_total_limit=2,
logging_steps=20,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=eval_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_evaluator = CECorrelationEvaluator(
sentence_pairs=list(zip(test_dataset["sentence1"], test_dataset["sentence2"])),
scores=test_dataset["score"],
name="stsb-test",
)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType
from ..utils.misc import unpack_gt_instances
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_backbone: ConfigType,
teacher_neck: ConfigType,
teacher_bbox_head: ConfigType,
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None) -> None:
super(KnowledgeDistillationSingleStageDetector, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self) -> bool:
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, batch_inputs: Tensor) -> Tensor:
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(batch_inputs)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(batch_inputs)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.loss(x, label_assignment_results,
batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
return losses
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType
from ..utils.misc import unpack_gt_instances
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_backbone: ConfigType,
teacher_neck: ConfigType,
teacher_bbox_head: ConfigType,
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None) -> None:
super(KnowledgeDistillationSingleStageDetector, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self) -> bool:
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, batch_inputs: Tensor) -> Tensor:
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(batch_inputs)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(batch_inputs)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.loss(x, label_assignment_results,
batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
return losses
|
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyUrl
@pytest.mark.proto
def test_proto_any_url():
uri = parse_obj_as(AnyUrl, 'http://jina.ai/img.png')
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(AnyUrl)
def test_dump_json():
url = parse_obj_as(AnyUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.parametrize(
'relative_path',
[
'data/05978.jpg',
'../../data/05978.jpg',
],
)
def test_relative_path(relative_path):
# see issue: https://github.com/docarray/docarray/issues/978
url = parse_obj_as(AnyUrl, relative_path)
assert url == relative_path
def test_operators():
url = parse_obj_as(AnyUrl, 'data/05978.jpg')
assert url == 'data/05978.jpg'
assert url != 'aljdñjd'
assert 'data' in url
assert 'docarray' not in url
def test_get_url_extension():
# Test with a URL with extension
assert AnyUrl._get_url_extension('https://jina.ai/hey.md?model=gpt-4') == 'md'
assert AnyUrl._get_url_extension('https://jina.ai/text.txt') == 'txt'
assert AnyUrl._get_url_extension('bla.jpg') == 'jpg'
# Test with a URL without extension
assert not AnyUrl._get_url_extension('https://jina.ai')
assert not AnyUrl._get_url_extension('https://jina.ai/?model=gpt-4')
# Test with a text without extension
assert not AnyUrl._get_url_extension('some_text')
# Test with empty input
assert not AnyUrl._get_url_extension('')
|
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AnyUrl
@pytest.mark.proto
def test_proto_any_url():
uri = parse_obj_as(AnyUrl, 'http://jina.ai/img.png')
uri._to_node_protobuf()
def test_json_schema():
schema_json_of(AnyUrl)
def test_dump_json():
url = parse_obj_as(AnyUrl, 'http://jina.ai/img.png')
orjson_dumps(url)
@pytest.mark.parametrize(
'relative_path',
[
'data/05978.jpg',
'../../data/05978.jpg',
],
)
def test_relative_path(relative_path):
# see issue: https://github.com/docarray/docarray/issues/978
url = parse_obj_as(AnyUrl, relative_path)
assert url == relative_path
def test_operators():
url = parse_obj_as(AnyUrl, 'data/05978.jpg')
assert url == 'data/05978.jpg'
assert url != 'aljdñjd'
assert 'data' in url
assert 'docarray' not in url
|
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
import sys
import traceback
from datasets import load_dataset
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_cnn-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to vectors using BERT
word_embedding_model = models.Transformer(model_name)
cnn = models.CNN(
in_word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(),
out_channels=256,
kernel_sizes=[1, 3, 5],
)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
cnn.get_word_embedding_dimension(),
pooling_mode="mean",
)
model = SentenceTransformer(modules=[word_embedding_model, cnn, pooling_model])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/losses.html#cosentloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="cnn", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-cnn")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-cnn')`."
)
|
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import csv
import gzip
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Read the dataset
batch_size = 32
model_save_path = "output/training_stsbenchmark_cnn-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
# Map tokens to vectors using BERT
word_embedding_model = models.Transformer("bert-base-uncased")
cnn = models.CNN(
in_word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(),
out_channels=256,
kernel_sizes=[1, 3, 5],
)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
cnn.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, cnn, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
model.evaluate(test_evaluator)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
|
import numpy as np
import pytest
from docarray.computation.numpy_backend import NumpyCompBackend
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((5)), 1),
(np.zeros((1, 5)), 2),
(np.zeros((5, 5)), 2),
(np.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert NumpyCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((10,)), (10,)),
(np.zeros((5, 5)), (5, 5)),
(np.zeros(()), ()),
],
)
def test_shape(array, result):
shape = NumpyCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_empty():
array = NumpyCompBackend.empty((10, 3))
assert array.shape == (10, 3)
def test_empty_dtype():
tensor = NumpyCompBackend.empty((10, 3), dtype=np.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == np.int32
def test_empty_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.empty((10, 3), device='meta')
def test_squeeze():
tensor = np.zeros(shape=(1, 1, 3, 1))
squeezed = NumpyCompBackend.squeeze(tensor)
assert squeezed.shape == (3,)
@pytest.mark.parametrize(
'array,t_range,x_range,result',
[
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), None, np.array([0, 2, 4, 6, 8, 10])),
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), (0, 10), np.array([0, 1, 2, 3, 4, 5])),
(
np.array([[0.0, 1.0], [0.0, 1.0]]),
(0, 10),
None,
np.array([[0.0, 10.0], [0.0, 10.0]]),
),
],
)
def test_minmax_normalize(array, t_range, x_range, result):
output = NumpyCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output, result)
|
import numpy as np
import pytest
from docarray.computation.numpy_backend import NumpyCompBackend
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((5)), 1),
(np.zeros((1, 5)), 2),
(np.zeros((5, 5)), 2),
(np.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert NumpyCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((10,)), (10,)),
(np.zeros((5, 5)), (5, 5)),
(np.zeros(()), ()),
],
)
def test_shape(array, result):
shape = NumpyCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_empty():
array = NumpyCompBackend.empty((10, 3))
assert array.shape == (10, 3)
def test_empty_dtype():
tensor = NumpyCompBackend.empty((10, 3), dtype=np.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == np.int32
def test_empty_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.empty((10, 3), device='meta')
|
from pathlib import Path
import click
from rich.console import Console
from rich.theme import Theme
from .pkg import pkg
from .test import test
LLAMA_DEV_THEME = Theme(
{
"repr.path": "",
"repr.filename": "",
"repr.str": "",
"traceback.note": "cyan",
"info": "dim cyan",
"warning": "magenta",
"error": "bold red",
}
)
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
@click.version_option()
@click.option(
"--repo-root",
default=".",
help="Path to the llama_index repository, defaults to '.'",
)
@click.option("--debug", is_flag=True, help="Enable verbose output.")
@click.pass_context
def cli(ctx, repo_root: str, debug: bool):
"""The official CLI for development, testing, and automation in the LlamaIndex monorepo."""
ctx.obj = {
"console": Console(theme=LLAMA_DEV_THEME, soft_wrap=True),
"repo_root": Path(repo_root).resolve(),
"debug": debug,
}
cli.add_command(pkg)
cli.add_command(test)
|
from pathlib import Path
import click
from rich.console import Console
from rich.theme import Theme
from .pkg import pkg
from .test import test
LLAMA_DEV_THEME = Theme(
{
"repr.path": "",
"repr.filename": "",
"repr.str": "",
"traceback.note": "cyan",
"info": "dim cyan",
"warning": "magenta",
"error": "bold red",
}
)
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
@click.version_option()
@click.option(
"--repo-root",
default=".",
help="Path to the llama_index repository, defaults to '.'",
)
@click.pass_context
def cli(ctx, repo_root: str):
"""The official CLI for development, testing, and automation in the LlamaIndex monorepo."""
ctx.obj = {
"console": Console(theme=LLAMA_DEV_THEME, soft_wrap=True),
"repo_root": Path(repo_root).resolve(),
}
cli.add_command(pkg)
cli.add_command(test)
|
"""Open Weather Map tool spec."""
from typing import Any, List
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class OpenWeatherMapToolSpec(BaseToolSpec):
"""Open Weather tool spec."""
spec_functions = ["weather_at_location", "forecast_tomorrow_at_location"]
def __init__(self, key: str, temp_units: str = "celsius") -> None:
"""Initialize with parameters."""
try:
from pyowm import OWM
except ImportError:
raise ImportError(
"The OpenWeatherMap tool requires the pyowm package to be installed. "
"Please install it using `pip install pyowm`."
)
self.key = key
self.temp_units = temp_units
self._owm = OWM(self.key)
self._mgr = self._owm.weather_manager()
def _format_temp(self, temperature: Any, temp_unit: str) -> str:
return (
f" - Current: {temperature['temp']}{temp_unit}\n"
f" - High: {temperature['temp_max']}{temp_unit}\n"
f" - Low: {temperature['temp_min']}{temp_unit}\n"
f" - Feels like: {temperature['feels_like']}{temp_unit}"
)
def _format_weather(
self, place: str, temp_str: str, w: Any, time_str: str = "now"
) -> str:
"""Format weather response from OpenWeatherMap.
Function thanks to
langchain/utilities/openweathermap.py
"""
detailed_status = w.detailed_status
wind = w.wind()
humidity = w.humidity
rain = w.rain
heat_index = w.heat_index
clouds = w.clouds
return (
f"In {place}, the weather for {time_str} is as follows:\n"
f"Detailed status: {detailed_status}\n"
f"Wind speed: {wind['speed']} m/s, direction: {wind['deg']}°\n"
f"Humidity: {humidity}%\n"
"Temperature: \n"
f"{temp_str}\n"
f"Rain: {rain}\n"
f"Heat index: {heat_index!s}\n"
f"Cloud cover: {clouds}%"
)
def weather_at_location(self, location: str) -> List[Document]:
"""
Finds the current weather at a location.
Args:
place (str):
The place to find the weather at.
Should be a city name and country.
"""
from pyowm.commons.exceptions import NotFoundError
try:
observation = self._mgr.weather_at_place(location)
except NotFoundError:
return [Document(text=f"Unable to find weather at {location}.")]
w = observation.weather
temperature = w.temperature(self.temp_units)
temp_unit = "°C" if self.temp_units == "celsius" else "°F"
temp_str = self._format_temp(temperature, temp_unit)
weather_text = self._format_weather(location, temp_str, w)
return [Document(text=weather_text, metadata={"weather from": location})]
def forecast_tomorrow_at_location(self, location: str) -> List[Document]:
"""
Finds the weather forecast for tomorrow at a location.
Args:
location (str):
The location to find the weather tomorrow at.
Should be a city name and country.
"""
from pyowm.commons.exceptions import NotFoundError
from pyowm.utils import timestamps
try:
forecast = self._mgr.forecast_at_place(location, "3h")
except NotFoundError:
return [Document(text=f"Unable to find weather at {location}.")]
tomorrow = timestamps.tomorrow()
w = forecast.get_weather_at(tomorrow)
temperature = w.temperature(self.temp_units)
temp_unit = "°C" if self.temp_units == "celsius" else "°F"
temp_str = self._format_temp(temperature, temp_unit)
weather_text = self._format_weather(location, temp_str, w, "tomorrow")
return [
Document(
text=weather_text,
metadata={
"weather from": location,
"forecast for": tomorrow.strftime("%Y-%m-%d"),
},
)
]
|
"""Open Weather Map tool spec."""
from typing import Any, List
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class OpenWeatherMapToolSpec(BaseToolSpec):
"""Open Weather tool spec."""
spec_functions = ["weather_at_location", "forecast_tommorrow_at_location"]
def __init__(self, key: str, temp_units: str = "celsius") -> None:
"""Initialize with parameters."""
try:
from pyowm import OWM
except ImportError:
raise ImportError(
"The OpenWeatherMap tool requires the pyowm package to be installed. "
"Please install it using `pip install pyowm`."
)
self.key = key
self.temp_units = temp_units
self._owm = OWM(self.key)
self._mgr = self._owm.weather_manager()
def _format_temp(self, temperature: Any, temp_unit: str) -> str:
return (
f" - Current: {temperature['temp']}{temp_unit}\n"
f" - High: {temperature['temp_max']}{temp_unit}\n"
f" - Low: {temperature['temp_min']}{temp_unit}\n"
f" - Feels like: {temperature['feels_like']}{temp_unit}"
)
def _format_weather(
self, place: str, temp_str: str, w: Any, time_str: str = "now"
) -> str:
"""Format weather response from OpenWeatherMap.
Function thanks to
langchain/utilities/openweathermap.py
"""
detailed_status = w.detailed_status
wind = w.wind()
humidity = w.humidity
rain = w.rain
heat_index = w.heat_index
clouds = w.clouds
return (
f"In {place}, the weather for {time_str} is as follows:\n"
f"Detailed status: {detailed_status}\n"
f"Wind speed: {wind['speed']} m/s, direction: {wind['deg']}°\n"
f"Humidity: {humidity}%\n"
"Temperature: \n"
f"{temp_str}\n"
f"Rain: {rain}\n"
f"Heat index: {heat_index!s}\n"
f"Cloud cover: {clouds}%"
)
def weather_at_location(self, location: str) -> List[Document]:
"""
Finds the current weather at a location.
Args:
place (str):
The place to find the weather at.
Should be a city name and country.
"""
from pyowm.commons.exceptions import NotFoundError
try:
observation = self._mgr.weather_at_place(location)
except NotFoundError:
return [Document(text=f"Unable to find weather at {location}.")]
w = observation.weather
temperature = w.temperature(self.temp_units)
temp_unit = "°C" if self.temp_units == "celsius" else "°F"
temp_str = self._format_temp(temperature, temp_unit)
weather_text = self._format_weather(location, temp_str, w)
return [Document(text=weather_text, metadata={"weather from": location})]
def forecast_tommorrow_at_location(self, location: str) -> List[Document]:
"""
Finds the weather forecast for tomorrow at a location.
Args:
location (str):
The location to find the weather tomorrow at.
Should be a city name and country.
"""
from pyowm.commons.exceptions import NotFoundError
from pyowm.utils import timestamps
try:
forecast = self._mgr.forecast_at_place(location, "3h")
except NotFoundError:
return [Document(text=f"Unable to find weather at {location}.")]
tomorrow = timestamps.tomorrow()
w = forecast.get_weather_at(tomorrow)
temperature = w.temperature(self.temp_units)
temp_unit = "°C" if self.temp_units == "celsius" else "°F"
temp_str = self._format_temp(temperature, temp_unit)
weather_text = self._format_weather(location, temp_str, w, "tomorrow")
return [
Document(
text=weather_text,
metadata={
"weather from": location,
"forecast for": tomorrow.strftime("%Y-%m-%d"),
},
)
]
|
import asyncio
import sys
import pytest
from llama_index.core import Document
from llama_index.graph_rag.cognee import CogneeGraphRAG
def test_smoke():
"""No-op test: CI will fail if no tests are collected."""
@pytest.mark.skipif(
sys.version_info < (3, 10), reason="mock strategy requires python3.10 or higher"
)
@pytest.mark.asyncio
async def test_add_data(monkeypatch):
# Instantiate cognee GraphRAG
cogneeGraphRAG = CogneeGraphRAG(
llm_api_key="",
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_db",
)
async def mock_add_return(add, dataset_name):
return True
import cognee
monkeypatch.setattr(cognee, "add", mock_add_return)
# Gather documents to add to GraphRAG
documents = [
Document(
text="Jessica Miller, Experienced Sales Manager with a strong track record in driving sales growth and building high-performing teams."
),
Document(
text="David Thompson, Creative Graphic Designer with over 8 years of experience in visual design and branding."
),
]
await cogneeGraphRAG.add(documents, "test")
await cogneeGraphRAG.add(documents[0], "test")
if __name__ == "__main__":
asyncio.run(test_add_data())
|
import asyncio
import sys
import pytest
from llama_index.core import Document
from llama_index.graph_rag.cognee import CogneeGraphRAG
def test_smoke():
"""No-op test: CI will fail if no tests are collected."""
@pytest.mark.skipif(
sys.version_info < (3, 10), reason="mock strategy requires python3.10 or higher"
)
@pytest.mark.asyncio()
async def test_add_data(monkeypatch):
# Instantiate cognee GraphRAG
cogneeGraphRAG = CogneeGraphRAG(
llm_api_key="",
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_db",
)
async def mock_add_return(add, dataset_name):
return True
import cognee
monkeypatch.setattr(cognee, "add", mock_add_return)
# Gather documents to add to GraphRAG
documents = [
Document(
text="Jessica Miller, Experienced Sales Manager with a strong track record in driving sales growth and building high-performing teams."
),
Document(
text="David Thompson, Creative Graphic Designer with over 8 years of experience in visual design and branding."
),
]
await cogneeGraphRAG.add(documents, "test")
await cogneeGraphRAG.add(documents[0], "test")
if __name__ == "__main__":
asyncio.run(test_add_data())
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import StarRocks
from langchain_community.vectorstores.starrocks import StarRocksSettings
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"StarRocksSettings": "langchain_community.vectorstores.starrocks",
"StarRocks": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"StarRocks",
"StarRocksSettings",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import StarRocks
from langchain_community.vectorstores.starrocks import StarRocksSettings
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"StarRocksSettings": "langchain_community.vectorstores.starrocks",
"StarRocks": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"StarRocksSettings",
"StarRocks",
]
|
import aiohttp
import pytest
from jina import Executor, Flow, requests
from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet
from jina.clients.request.helper import _new_data_request
from jina.excepts import BadServer
from jina.logging.logger import JinaLogger
from jina.types.request.data import DataRequest
logger = JinaLogger('clientlet')
class ClientTestExecutor(Executor):
@requests
def error(self, **kwargs):
raise NotImplementedError
@pytest.fixture
def flow_with_exception_request():
return Flow().add(uses=ClientTestExecutor).add()
@pytest.mark.asyncio
async def test_http_clientlet():
from jina.helper import random_port
port = random_port()
with Flow(port=port, protocol='http').add():
async with HTTPClientlet(
url=f'http://localhost:{port}/post', logger=logger
) as iolet:
request = _new_data_request('/', None, {'a': 'b'})
r = await iolet.send_message(request)
response = DataRequest(await r.json())
assert response.header.exec_endpoint == '/'
assert response.parameters == {'a': 'b'}
@pytest.mark.asyncio
async def test_websocket_clientlet():
with pytest.raises(aiohttp.ClientError):
async with WebsocketClientlet(
url='ws://localhost:12345', logger=logger
) as iolet:
pass
def test_client_behaviour(flow_with_exception_request, mocker):
on_done_mock = mocker.Mock()
on_always_mock = mocker.Mock()
on_error_mock = None
with pytest.raises(BadServer):
with flow_with_exception_request as f:
f.post(
'',
on_done=on_done_mock,
on_error=on_error_mock,
on_always=on_always_mock,
)
on_always_mock.assert_called_once()
on_done_mock.assert_not_called()
on_error_mock = mocker.Mock()
on_done_mock = mocker.Mock()
on_always_mock = mocker.Mock()
with flow_with_exception_request as f:
f.post(
'', on_done=on_done_mock, on_error=on_error_mock, on_always=on_always_mock
)
on_always_mock.assert_called_once()
on_done_mock.assert_not_called()
on_error_mock.assert_called_once()
|
import aiohttp
import pytest
from jina import Executor, Flow, requests
from jina.clients.base.helper import HTTPClientlet, WebsocketClientlet
from jina.clients.request.helper import _new_data_request
from jina.logging.logger import JinaLogger
from jina.types.request.data import DataRequest
logger = JinaLogger('clientlet')
class ClientTestExecutor(Executor):
@requests
def error(self, **kwargs):
raise NotImplementedError
@pytest.fixture
def flow_with_exception_request():
return Flow().add(uses=ClientTestExecutor).add()
@pytest.mark.asyncio
async def test_http_clientlet():
from jina.helper import random_port
port = random_port()
with Flow(port=port, protocol='http').add():
async with HTTPClientlet(
url=f'http://localhost:{port}/post', logger=logger
) as iolet:
request = _new_data_request('/', None, {'a': 'b'})
r = await iolet.send_message(request)
response = DataRequest(await r.json())
assert response.header.exec_endpoint == '/'
assert response.parameters == {'a': 'b'}
@pytest.mark.asyncio
async def test_websocket_clientlet():
with pytest.raises(aiohttp.ClientError):
async with WebsocketClientlet(
url='ws://localhost:12345', logger=logger
) as iolet:
pass
def test_client_behaviour(flow_with_exception_request, mocker):
on_done_mock = mocker.Mock()
on_always_mock = mocker.Mock()
on_error_mock = None
with flow_with_exception_request as f:
f.post(
'', on_done=on_done_mock, on_error=on_error_mock, on_always=on_always_mock
)
on_always_mock.assert_called_once()
on_done_mock.assert_not_called()
on_error_mock = mocker.Mock()
on_done_mock = mocker.Mock()
on_always_mock = mocker.Mock()
with flow_with_exception_request as f:
f.post(
'', on_done=on_done_mock, on_error=on_error_mock, on_always=on_always_mock
)
on_always_mock.assert_called_once()
on_done_mock.assert_not_called()
on_error_mock.assert_called_once()
|
import json
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import ConfigDict
from langchain_community.utilities.graphql import GraphQLAPIWrapper
class BaseGraphQLTool(BaseTool):
"""Base tool for querying a GraphQL API."""
graphql_wrapper: GraphQLAPIWrapper
name: str = "query_graphql"
description: str = """\
Input to this tool is a detailed and correct GraphQL query, output is a result from the API.
If the query is not correct, an error message will be returned.
If an error is returned with 'Bad request' in it, rewrite the query and try again.
If an error is returned with 'Unauthorized' in it, do not try again, but tell the user to change their authentication.
Example Input: query {{ allUsers {{ id, name, email }} }}\
""" # noqa: E501
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def _run(
self,
tool_input: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
result = self.graphql_wrapper.run(tool_input)
return json.dumps(result, indent=2)
|
import json
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import ConfigDict
from langchain_community.utilities.graphql import GraphQLAPIWrapper
class BaseGraphQLTool(BaseTool): # type: ignore[override]
"""Base tool for querying a GraphQL API."""
graphql_wrapper: GraphQLAPIWrapper
name: str = "query_graphql"
description: str = """\
Input to this tool is a detailed and correct GraphQL query, output is a result from the API.
If the query is not correct, an error message will be returned.
If an error is returned with 'Bad request' in it, rewrite the query and try again.
If an error is returned with 'Unauthorized' in it, do not try again, but tell the user to change their authentication.
Example Input: query {{ allUsers {{ id, name, email }} }}\
""" # noqa: E501
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def _run(
self,
tool_input: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
result = self.graphql_wrapper.run(tool_input)
return json.dumps(result, indent=2)
|
import importlib.util
import os
import warnings
from functools import wraps
from typing import Optional
def eval_env(var, default):
"""Check if environment varable has True-y value"""
if var not in os.environ:
return default
val = os.environ.get(var, "0")
trues = ["1", "true", "TRUE", "on", "ON", "yes", "YES"]
falses = ["0", "false", "FALSE", "off", "OFF", "no", "NO"]
if val in trues:
return True
if val not in falses:
# fmt: off
raise RuntimeError(
f"Unexpected environment variable value `{var}={val}`. "
f"Expected one of {trues + falses}")
# fmt: on
return False
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
return all(importlib.util.find_spec(m) is not None for m in modules)
def requires_module(*modules: str):
"""Decorate function to give error message if invoked without required optional modules.
This decorator is to give better error message to users rather
than raising ``NameError: name 'module' is not defined`` at random places.
"""
missing = [m for m in modules if not is_module_available(m)]
if not missing:
# fall through. If all the modules are available, no need to decorate
def decorator(func):
return func
else:
req = f"module: {missing[0]}" if len(missing) == 1 else f"modules: {missing}"
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires {req}")
return wrapped
return decorator
UNSUPPORTED = []
def deprecated(direction: str, version: Optional[str] = None, remove: bool = False):
"""Decorator to add deprecation message
Args:
direction (str): Migration steps to be given to users.
version (str or int): The version when the object will be removed
remove (bool): If enabled, append future removal message.
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = f"{func.__module__}.{func.__name__} has been deprecated. {direction}"
if remove:
message += f' It will be removed from {"future" if version is None else version} release. '
warnings.warn(message, stacklevel=2)
return func(*args, **kwargs)
message = "This function has been deprecated. "
if remove:
message += f'It will be removed from {"future" if version is None else version} release. '
wrapped.__doc__ = f"""DEPRECATED: {func.__doc__}
.. warning::
{message}
{direction}
"""
UNSUPPORTED.append(wrapped)
return wrapped
return decorator
dropping_support = deprecated(
"As TorchAudio is no longer being actively developed, this function can no longer be supported."
"See https://github.com/pytorch/audio/issues/3902 for more details.", version="2.9", remove=True)
dropping_io_support = deprecated(
"This functionality has been superseded by `AudioDecoder` from the TorchCodec library."
"See https://github.com/pytorch/audio/issues/3902 for more details.", version="2.9", remove=True)
def fail_with_message(message):
"""Generate decorator to give users message about missing TorchAudio extension."""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} {message}")
return wrapped
return decorator
def no_op(func):
"""Op-op decorator. Used in place of fail_with_message when a functionality that requires extension works fine."""
return func
|
import importlib.util
import os
import warnings
from functools import wraps
from typing import Optional
def eval_env(var, default):
"""Check if environment varable has True-y value"""
if var not in os.environ:
return default
val = os.environ.get(var, "0")
trues = ["1", "true", "TRUE", "on", "ON", "yes", "YES"]
falses = ["0", "false", "FALSE", "off", "OFF", "no", "NO"]
if val in trues:
return True
if val not in falses:
# fmt: off
raise RuntimeError(
f"Unexpected environment variable value `{var}={val}`. "
f"Expected one of {trues + falses}")
# fmt: on
return False
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
return all(importlib.util.find_spec(m) is not None for m in modules)
def requires_module(*modules: str):
"""Decorate function to give error message if invoked without required optional modules.
This decorator is to give better error message to users rather
than raising ``NameError: name 'module' is not defined`` at random places.
"""
missing = [m for m in modules if not is_module_available(m)]
if not missing:
# fall through. If all the modules are available, no need to decorate
def decorator(func):
return func
else:
req = f"module: {missing[0]}" if len(missing) == 1 else f"modules: {missing}"
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires {req}")
return wrapped
return decorator
UNSUPPORTED = []
def deprecated(direction: str, version: Optional[str] = None, remove: bool = False):
"""Decorator to add deprecation message
Args:
direction (str): Migration steps to be given to users.
version (str or int): The version when the object will be removed
remove (bool): If enabled, append future removal message.
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = f"{func.__module__}.{func.__name__} has been deprecated. {direction}"
if remove:
message += f' It will be removed from {"future" if version is None else version} release. '
warnings.warn(message, stacklevel=2)
return func(*args, **kwargs)
message = "This function has been deprecated. "
if remove:
message += f'It will be removed from {"future" if version is None else version} release. '
wrapped.__doc__ = f"""DEPRECATED: {func.__doc__}
.. warning::
{message}
{direction}
"""
UNSUPPORTED.append(wrapped)
return wrapped
return decorator
dropping_support = deprecated(
"As TorchAudio is no longer being actively developed, this function can no longer be supported."
"See https://github.com/pytorch/audio/issues/3902 for more details.", version="2.9", remove=True)
def fail_with_message(message):
"""Generate decorator to give users message about missing TorchAudio extension."""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} {message}")
return wrapped
return decorator
def no_op(func):
"""Op-op decorator. Used in place of fail_with_message when a functionality that requires extension works fine."""
return func
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=601))
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=26000,
warmup_ratio=1.0 / 64,
step=[8, 11])
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/openimages_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=601))
optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=26000,
warmup_ratio=1.0 / 64,
step=[8, 11])
|
from unittest.mock import MagicMock
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.tools import FunctionTool
from llama_index.llms.oci_genai import OCIGenAI
def test_oci_genai_embedding_class():
names_of_base_classes = [b.__name__ for b in OCIGenAI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
# Shared test tool for tool_required tests
def search(query: str) -> str:
"""Search for information about a query."""
return f"Results for {query}"
search_tool = FunctionTool.from_defaults(
fn=search, name="search_tool", description="A tool for searching information"
)
def test_prepare_chat_with_tools_tool_required():
"""Test that tool_required is correctly passed to the API request when True."""
# Mock the client to avoid authentication issues
mock_client = MagicMock()
llm = OCIGenAI(
model="cohere.command-r-16k",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="test_compartment_id",
client=mock_client,
)
# Test with tool_required=True
result = llm._prepare_chat_with_tools(
tools=[search_tool], user_msg="Test message", tool_required=True
)
assert result["tool_choice"] == "REQUIRED"
assert len(result["tools"]) == 1
# CohereTool objects have a `name` attribute directly
assert result["tools"][0].name == "search_tool"
def test_prepare_chat_with_tools_tool_not_required():
"""Test that tool_required is correctly passed to the API request when False."""
# Mock the client to avoid authentication issues
mock_client = MagicMock()
llm = OCIGenAI(
model="cohere.command-r-16k",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="test_compartment_id",
client=mock_client,
)
# Test with tool_required=False (default)
result = llm._prepare_chat_with_tools(
tools=[search_tool],
user_msg="Test message",
)
# When tool_required is False, tool_choice should not be included
assert "tool_choice" not in result
assert len(result["tools"]) == 1
# CohereTool objects have a `name` attribute directly
assert result["tools"][0].name == "search_tool"
|
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.oci_genai import OCIGenAI
def test_oci_genai_embedding_class():
names_of_base_classes = [b.__name__ for b in OCIGenAI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
|
_base_ = 'retinanet_r50_fpn_1x_coco.py'
# training schedule for 90k
train_cfg = dict(by_epoch=False, max_iters=90000)
val_cfg = dict(interval=10000)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=90000,
by_epoch=False,
milestones=[60000, 80000],
gamma=0.1)
]
default_hooks = dict(checkpoint=dict(interval=10000))
|
_base_ = 'retinanet_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[60000, 80000])
# Runner type
runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000)
checkpoint_config = dict(interval=10000)
evaluation = dict(interval=10000, metric='bbox')
|
"""Tool for the SearchApi.io search API."""
from typing import Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
class SearchAPIRun(BaseTool):
"""Tool that queries the SearchApi.io search API."""
name: str = "searchapi"
description: str = (
"Google search API provided by SearchApi.io."
"This tool is handy when you need to answer questions about current events."
"Input should be a search query."
)
api_wrapper: SearchApiAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return await self.api_wrapper.arun(query)
class SearchAPIResults(BaseTool):
"""Tool that queries the SearchApi.io search API and returns JSON."""
name: str = "searchapi_results_json"
description: str = (
"Google search API provided by SearchApi.io."
"This tool is handy when you need to answer questions about current events."
"The input should be a search query and the output is a JSON object "
"with the query results."
)
api_wrapper: SearchApiAPIWrapper = Field(default_factory=SearchApiAPIWrapper)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.api_wrapper.results(query))
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return (await self.api_wrapper.aresults(query)).__str__()
|
"""Tool for the SearchApi.io search API."""
from typing import Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
class SearchAPIRun(BaseTool): # type: ignore[override]
"""Tool that queries the SearchApi.io search API."""
name: str = "searchapi"
description: str = (
"Google search API provided by SearchApi.io."
"This tool is handy when you need to answer questions about current events."
"Input should be a search query."
)
api_wrapper: SearchApiAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return await self.api_wrapper.arun(query)
class SearchAPIResults(BaseTool): # type: ignore[override]
"""Tool that queries the SearchApi.io search API and returns JSON."""
name: str = "searchapi_results_json"
description: str = (
"Google search API provided by SearchApi.io."
"This tool is handy when you need to answer questions about current events."
"The input should be a search query and the output is a JSON object "
"with the query results."
)
api_wrapper: SearchApiAPIWrapper = Field(default_factory=SearchApiAPIWrapper)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.api_wrapper.results(query))
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return (await self.api_wrapper.aresults(query)).__str__()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.