input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TEXT_MIMETYPE = 'text'
AUDIO_MIMETYPE = 'audio'
IMAGE_MIMETYPE = 'image'
OBJ_MIMETYPE = 'application/x-tgif'
VIDEO_MIMETYPE = 'video'
MESH_EXTRA_EXTENSIONS = [
'3ds',
'3mf',
'ac',
'ac3d',
'amf',
'assimp',
'bvh',
'cob',
'collada',
'ctm',
'dxf',
'e57',
'fbx',
'gltf',
'glb',
'ifc',
'lwo',
'lws',
'lxo',
'md2',
'md3',
'md5',
'mdc',
'm3d',
'mdl',
'ms3d',
'nff',
'obj',
'off',
'pcd',
'pod',
'pmd',
'pmx',
'ply',
'q3o',
'q3s',
'raw',
'sib',
'smd',
'stl',
'ter',
'terragen',
'vtk',
'vrml',
'x3d',
'xaml',
'xgl',
'xml',
'xyz',
'zgl',
'vta',
]
TEXT_EXTRA_EXTENSIONS = ['md', 'log']
POINT_CLOUD_EXTRA_EXTENSIONS = [
'ascii',
'bin',
'b3dm',
'bpf',
'dp',
'dxf',
'e57',
'fls',
'fls',
'glb',
'ply',
'gpf',
'las',
'obj',
'osgb',
'pcap',
'pcd',
'pdal',
'pfm',
'ply',
'ply2',
'pod',
'pods',
'pnts',
'ptg',
'ptx',
'pts',
'rcp',
'xyz',
'zfs',
]
|
TEXT_MIMETYPE = 'text'
AUDIO_MIMETYPE = 'audio'
IMAGE_MIMETYPE = 'image'
OBJ_MIMETYPE = 'application/x-tgif'
VIDEO_MIMETYPE = 'video'
MESH_EXTRA_EXTENSIONS = [
'3ds',
'3mf',
'ac',
'ac3d',
'amf',
'assimp',
'bvh',
'cob',
'collada',
'ctm',
'dxf',
'e57',
'fbx',
'gltf',
'glb',
'ifc',
'lwo',
'lws',
'lxo',
'md2',
'md3',
'md5',
'mdc',
'm3d',
'mdl',
'ms3d',
'nff',
'obj',
'off',
'pcd',
'pod',
'pmd',
'pmx',
'ply',
'q3o',
'q3s',
'raw',
'sib',
'smd',
'stl',
'ter',
'terragen',
'vtk',
'vrml',
'x3d',
'xaml',
'xgl',
'xml',
'xyz',
'zgl',
'vta',
]
TEXT_EXTRA_EXTENSIONS = ['md', 'log']
POINT_CLOUD_EXTRA_EXTENSIONS = [
'ascii',
'bin',
'b3dm',
'bpf',
'dp',
'dxf',
'e57',
'fls',
'fls',
'glb',
'ply',
'gpf',
'las',
'obj',
'osgb',
'pcap',
'pcd',
'pdal',
'pfm',
'ply',
'ply2',
'pod',
'pods',
'pnts',
'ptg',
'ptx',
'pts',
'rcp',
'xyz',
'zfs',
]
|
import contextlib
import os
import sqlite3
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _check_sql_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_sql_keep_in_memory(keep_in_memory, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = SqlDatasetReader(
"dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory
).read()
_check_sql_dataset(dataset, expected_features)
@require_sqlalchemy
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_sql_features(features, sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, features=features, cache_dir=cache_dir).read()
_check_sql_dataset(dataset, expected_features)
def iter_sql_file(sqlite_path):
with contextlib.closing(sqlite3.connect(sqlite_path)) as con:
cur = con.cursor()
cur.execute("SELECT * FROM dataset")
for row in cur:
yield row
@require_sqlalchemy
def test_dataset_to_sql(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=1).write()
original_sql = iter_sql_file(sqlite_path)
expected_sql = iter_sql_file(output_sqlite_path)
for row1, row2 in zip(original_sql, expected_sql):
assert row1 == row2
@require_sqlalchemy
def test_dataset_to_sql_multiproc(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=2).write()
original_sql = iter_sql_file(sqlite_path)
expected_sql = iter_sql_file(output_sqlite_path)
for row1, row2 in zip(original_sql, expected_sql):
assert row1 == row2
@require_sqlalchemy
def test_dataset_to_sql_invalidproc(sqlite_path, tmp_path, set_sqlalchemy_silence_uber_warning):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
with pytest.raises(ValueError):
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=0).write()
|
import contextlib
import os
import sqlite3
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _check_sql_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_sql_keep_in_memory(keep_in_memory, sqlite_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = SqlDatasetReader(
"dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory
).read()
_check_sql_dataset(dataset, expected_features)
@require_sqlalchemy
@pytest.mark.parametrize(
"features",
[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
],
)
def test_dataset_from_sql_features(features, sqlite_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, features=features, cache_dir=cache_dir).read()
_check_sql_dataset(dataset, expected_features)
def iter_sql_file(sqlite_path):
with contextlib.closing(sqlite3.connect(sqlite_path)) as con:
cur = con.cursor()
cur.execute("SELECT * FROM dataset")
for row in cur:
yield row
@require_sqlalchemy
def test_dataset_to_sql(sqlite_path, tmp_path):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=1).write()
original_sql = iter_sql_file(sqlite_path)
expected_sql = iter_sql_file(output_sqlite_path)
for row1, row2 in zip(original_sql, expected_sql):
assert row1 == row2
@require_sqlalchemy
def test_dataset_to_sql_multiproc(sqlite_path, tmp_path):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=2).write()
original_sql = iter_sql_file(sqlite_path)
expected_sql = iter_sql_file(output_sqlite_path)
for row1, row2 in zip(original_sql, expected_sql):
assert row1 == row2
@require_sqlalchemy
def test_dataset_to_sql_invalidproc(sqlite_path, tmp_path):
cache_dir = tmp_path / "cache"
output_sqlite_path = os.path.join(cache_dir, "tmp.sql")
dataset = SqlDatasetReader("dataset", "sqlite:///" + sqlite_path, cache_dir=cache_dir).read()
with pytest.raises(ValueError):
SqlDatasetWriter(dataset, "dataset", "sqlite:///" + output_sqlite_path, num_proc=0).write()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.html.bs4 import BS4HTMLParser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BS4HTMLParser": "langchain_community.document_loaders.parsers.html.bs4",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BS4HTMLParser",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.html.bs4 import BS4HTMLParser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BS4HTMLParser": "langchain_community.document_loaders.parsers.html.bs4"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BS4HTMLParser",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import pytest
from mmengine import Config, DefaultScope
from mmengine.hub import get_config, get_model
from mmengine.utils import get_installed_path, is_installed
data_path = osp.join(osp.dirname(osp.dirname(__file__)), 'data/')
# mmdet has a more typical config structure, while mmpose has a complex
# config structure
@pytest.mark.skipif(
not (is_installed('mmdet') and is_installed('mmpose')),
reason='mmdet and mmpose should be installed')
def test_get_config():
# Test load base config.
base_cfg = get_config('mmdet::_base_/models/faster-rcnn_r50_fpn.py')
package_path = get_installed_path('mmdet')
test_base_cfg = Config.fromfile(
osp.join(package_path, '.mim',
'configs/_base_/models/faster-rcnn_r50_fpn.py'))
assert test_base_cfg._cfg_dict == base_cfg._cfg_dict
# Test load faster_rcnn config
cfg = get_config('mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py')
test_cfg = Config.fromfile(
osp.join(package_path, '.mim',
'configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'))
assert cfg._cfg_dict == test_cfg._cfg_dict
# Test pretrained
cfg = get_config(
'mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py', pretrained=True)
assert cfg.model_path == 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa E301
# Test load mmpose
get_config(
'mmpose::face_2d_keypoint/topdown_heatmap/wflw/td-hm_hrnetv2-w18_8xb64-60e_wflw-256x256.py' # noqa E501
)
@pytest.mark.skipif(
not is_installed('mmdet'), reason='mmdet and mmpose should be installed')
def test_get_model():
# TODO compatible with downstream codebase.
DefaultScope.get_instance('test_get_model', scope_name='test_scope')
get_model('mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py')
assert DefaultScope.get_current_instance().scope_name == 'test_scope'
DefaultScope._instance_dict.pop('test_get_model')
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import pytest
from mmengine import Config, DefaultScope
from mmengine.hub import get_config, get_model
from mmengine.utils import get_installed_path, is_installed
data_path = osp.join(osp.dirname(osp.dirname(__file__)), 'data/')
# mmdet has a more typical config structure, while mmpose has a complex
# config structure
@pytest.mark.skipif(
not (is_installed('mmdet') and is_installed('mmpose')),
reason='mmdet and mmpose should be installed')
def test_get_config():
# Test load base config.
base_cfg = get_config('mmdet::_base_/models/faster-rcnn_r50_fpn.py')
package_path = get_installed_path('mmdet')
test_base_cfg = Config.fromfile(
osp.join(package_path, '.mim',
'configs/_base_/models/faster-rcnn_r50_fpn.py'))
assert test_base_cfg._cfg_dict == base_cfg._cfg_dict
# Test load faster_rcnn config
cfg = get_config('mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py')
test_cfg = Config.fromfile(
osp.join(package_path, '.mim',
'configs/faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'))
assert cfg._cfg_dict == test_cfg._cfg_dict
# Test pretrained
cfg = get_config(
'mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py', pretrained=True)
assert cfg.model_path == 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa E301
# Test load mmpose
get_config(
'mmpose::face/2d_kpt_sview_rgb_img/deeppose/wflw/res50_wflw_256x256'
'.py')
@pytest.mark.skipif(
not is_installed('mmdet'), reason='mmdet and mmpose should be installed')
def test_get_model():
# TODO compatible with downstream codebase.
DefaultScope.get_instance('test_get_model', scope_name='test_scope')
get_model('mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py')
assert DefaultScope.get_current_instance().scope_name == 'test_scope'
DefaultScope._instance_dict.pop('test_get_model')
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 48
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[32, 44],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='InstaBoost',
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 48
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[32, 44],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
from typing import List, Optional
import torchaudio
from torchaudio._internal.module_utils import deprecated
from . import utils
# TODO: Once legacy global backend is removed, move this to torchaudio.__init__
def _init_backend():
torchaudio.info = utils.get_info_func()
torchaudio.load = utils.get_load_func()
torchaudio.save = utils.get_save_func()
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
list of str: The list of available backends.
The possible values are;
- Dispatcher mode: ``"ffmpeg"``, ``"sox"`` and ``"soundfile"``.
- Legacy backend mode: ``"sox_io"``, ``"soundfile"``.
"""
return list(utils.get_available_backends().keys())
# Temporary until global backend is removed
@deprecated("With dispatcher enabled, this function is no-op. You can remove the function call.")
def get_audio_backend() -> Optional[str]:
"""Get the name of the current global backend
Returns:
str or None:
If dispatcher mode is enabled, returns ``None`` otherwise,
the name of current backend or ``None`` (no backend is set).
"""
return None
# Temporary until global backend is removed
@deprecated("With dispatcher enabled, this function is no-op. You can remove the function call.")
def set_audio_backend(backend: Optional[str]): # noqa
"""Set the global backend.
This is a no-op when dispatcher mode is enabled.
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
pass
|
import warnings
from typing import List, Optional
import torchaudio
from . import utils
# TODO: Once legacy global backend is removed, move this to torchaudio.__init__
def _init_backend():
torchaudio.info = utils.get_info_func()
torchaudio.load = utils.get_load_func()
torchaudio.save = utils.get_save_func()
def list_audio_backends() -> List[str]:
return list(utils.get_available_backends().keys())
# Temporary until global backend is removed
def get_audio_backend() -> Optional[str]:
warnings.warn("I/O Dispatcher is enabled. There is no global audio backend.", stacklevel=2)
return None
# Temporary until global backend is removed
def set_audio_backend(_: Optional[str]):
warnings.warn("I/O Dispatcher is enabled. set_audio_backend is a no-op", stacklevel=2)
|
from __future__ import annotations
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import Any, Callable
import torch
from sentence_transformers.data_collator import SentenceTransformerDataCollator
@dataclass
class CrossEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a CrossEncoder model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
It is important that the columns are in the expected order. For example, if your dataset has columns
"answer", "question" in that order, then the MultipleNegativesRankingLoss will consider
"answer" as the anchor and "question" as the positive, and it will (unexpectedly) optimize for
"given the answer, what is the question?".
"""
tokenize_fn: Callable
valid_label_columns: list[str] = field(default_factory=lambda: ["label", "labels", "score", "scores"])
_warned_columns: set[tuple[str]] = field(default_factory=set, init=False, repr=False)
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
column_names = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in column_names:
column_names.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in column_names:
# If the label column is a list/tuple/collection, we create a list of tensors
if isinstance(features[0][label_column], Collection):
batch["label"] = [torch.tensor(row[label_column]) for row in features]
else:
# Otherwise, if it's e.g. single values, we create a tensor
batch["label"] = torch.tensor([row[label_column] for row in features])
column_names.remove(label_column)
break
for column_name in column_names:
# If the prompt length has been set, we should add it to the batch
if column_name.endswith("_prompt_length") and column_name[: -len("_prompt_length")] in column_names:
batch[column_name] = torch.tensor([row[column_name] for row in features], dtype=torch.int)
continue
batch[column_name] = [row[column_name] for row in features]
return batch
|
from __future__ import annotations
from dataclasses import field
from typing import Any, Callable
import torch
from sentence_transformers.data_collator import SentenceTransformerDataCollator
class CrossEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a CrossEncoder model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
It is important that the columns are in the expected order. For example, if your dataset has columns
"answer", "question" in that order, then the MultipleNegativesRankingLoss will consider
"answer" as the anchor and "question" as the positive, and it will (unexpectedly) optimize for
"given the answer, what is the question?".
"""
tokenize_fn: Callable
valid_label_columns: list[str] = field(default_factory=lambda: ["label", "score"])
_warned_columns: set[tuple[str]] = field(default_factory=set, init=False, repr=False)
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
column_names = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in column_names:
column_names.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# TODO:
# if tuple(column_names) not in self._warned_columns:
# self.maybe_warn_about_column_order(column_names)
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in column_names:
batch["label"] = torch.tensor([row[label_column] for row in features])
column_names.remove(label_column)
break
for column_name in column_names:
# If the prompt length has been set, we should add it to the batch
if column_name.endswith("_prompt_length") and column_name[: -len("_prompt_length")] in column_names:
batch[column_name] = torch.tensor([row[column_name] for row in features], dtype=torch.int)
continue
# tokenized = self.tokenize_fn([row[column_name] for row in features])
# for key, value in tokenized.items():
# batch[f"{column_name}_{key}"] = value
batch[column_name] = [row[column_name] for row in features]
return batch
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.evaluation.ReciprocalRankFusionEvaluator import (
ReciprocalRankFusionEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import (
SparseBinaryClassificationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseMSEEvaluator import (
SparseMSEEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import (
SparseNanoBEIREvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseRerankingEvaluator import (
SparseRerankingEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTranslationEvaluator import (
SparseTranslationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTripletEvaluator import (
SparseTripletEvaluator,
)
__all__ = [
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseBinaryClassificationEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"ReciprocalRankFusionEvaluator",
]
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import (
SparseBinaryClassificationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseMSEEvaluator import (
SparseMSEEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import (
SparseNanoBEIREvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseRerankingEvaluator import (
SparseRerankingEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTranslationEvaluator import (
SparseTranslationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTripletEvaluator import (
SparseTripletEvaluator,
)
__all__ = [
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseBinaryClassificationEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
]
|
from abc import abstractmethod
from typing import Iterable, Iterator
from qdrant_client import QdrantClient
from qdrant_client.http.exceptions import UnexpectedResponse
from qdrant_client.http.models.models import (
PointIdsList,
PointStruct,
VectorParams,
)
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
class GetSetDelMixin(BaseGetSetDelMixin):
@property
@abstractmethod
def client(self) -> QdrantClient:
raise NotImplementedError()
@property
@abstractmethod
def serialization_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def n_dim(self) -> int:
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def scroll_batch_size(self) -> int:
raise NotImplementedError()
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_qdrant(doc))
if len(batch) > self.scroll_batch_size:
self.client.upsert(
collection_name=self.collection_name,
points=batch,
wait=True,
)
batch = []
if len(batch) > 0:
self.client.upsert(
collection_name=self.collection_name,
wait=True,
points=batch,
)
def _qdrant_to_document(self, qdrant_record: dict) -> 'Document':
return Document.from_base64(
qdrant_record['_serialized'], **self.serialization_config
)
def _document_to_qdrant(self, doc: 'Document') -> 'PointStruct':
extra_columns = {
col: doc.tags.get(col) for col, _ in self._config.columns.items()
}
return PointStruct(
id=self._map_id(doc.id),
payload=dict(
_serialized=doc.to_base64(**self.serialization_config), **extra_columns
),
vector=self._map_embedding(doc.embedding),
)
def _get_doc_by_id(self, _id: str) -> 'Document':
try:
resp = self.client.retrieve(
collection_name=self.collection_name, ids=[self._map_id(_id)]
)
if len(resp) == 0:
raise KeyError(_id)
return self._qdrant_to_document(resp[0].payload)
except UnexpectedResponse as response_error:
if response_error.status_code in [404, 400]:
raise KeyError(_id)
def _del_doc_by_id(self, _id: str):
self.client.delete(
collection_name=self.collection_name,
points_selector=PointIdsList(points=[self._map_id(_id)]),
wait=True,
)
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
self.client.upsert(
collection_name=self.collection_name,
wait=True,
points=[self._document_to_qdrant(value)],
)
def scan(self) -> Iterator['Document']:
offset = None
while True:
response, next_page = self.client.scroll(
collection_name=self.collection_name,
offset=offset,
limit=self.scroll_batch_size,
with_payload=['_serialized'],
with_vectors=False,
)
for point in response:
yield self._qdrant_to_document(point.payload)
if next_page:
offset = next_page
else:
break
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
def _clear_storage(self):
self.client.recreate_collection(
self.collection_name,
vectors_config=VectorParams(
size=self.n_dim,
distance=self.distance,
),
)
|
from abc import abstractmethod
from typing import Iterable, Iterator
from qdrant_client import QdrantClient
from qdrant_client.http.exceptions import UnexpectedResponse
from qdrant_client.http.models.models import (
PointIdsList,
PointsList,
ScrollRequest,
PointStruct,
)
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
class GetSetDelMixin(BaseGetSetDelMixin):
@property
@abstractmethod
def client(self) -> QdrantClient:
raise NotImplementedError()
@property
@abstractmethod
def serialization_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def n_dim(self) -> int:
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def scroll_batch_size(self) -> int:
raise NotImplementedError()
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_qdrant(doc))
if len(batch) > self.scroll_batch_size:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
batch = []
if len(batch) > 0:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
def _qdrant_to_document(self, qdrant_record: dict) -> 'Document':
return Document.from_base64(
qdrant_record['_serialized'], **self.serialization_config
)
def _document_to_qdrant(self, doc: 'Document') -> 'PointStruct':
extra_columns = {
col: doc.tags.get(col) for col, _ in self._config.columns.items()
}
return PointStruct(
id=self._map_id(doc.id),
payload=dict(
_serialized=doc.to_base64(**self.serialization_config), **extra_columns
),
vector=self._map_embedding(doc.embedding),
)
def _get_doc_by_id(self, _id: str) -> 'Document':
try:
resp = self.client.http.points_api.get_point(
collection_name=self.collection_name, id=self._map_id(_id)
)
return self._qdrant_to_document(resp.result.payload)
except UnexpectedResponse as response_error:
if response_error.status_code in [404, 400]:
raise KeyError(_id)
def _del_doc_by_id(self, _id: str):
self.client.http.points_api.delete_points(
collection_name=self.collection_name,
wait=True,
points_selector=PointIdsList(points=[self._map_id(_id)]),
)
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(
points=[self._document_to_qdrant(value)]
),
)
def scan(self) -> Iterator['Document']:
offset = None
while True:
response = self.client.http.points_api.scroll_points(
collection_name=self.collection_name,
scroll_request=ScrollRequest(
offset=offset,
limit=self.scroll_batch_size,
with_payload=['_serialized'],
with_vector=False,
),
)
for point in response.result.points:
yield self._qdrant_to_document(point.payload)
if response.result.next_page_offset:
offset = response.result.next_page_offset
else:
break
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.recreate_collection(
self.collection_name,
vector_size=self.n_dim,
distance=self.distance,
)
|
"""OpenAI Finetuning."""
import logging
import os
import time
from typing import Any, Optional
import openai
from openai import OpenAI as SyncOpenAI
from openai.types.fine_tuning import FineTuningJob
from llama_index.core.llms.llm import LLM
from llama_index.finetuning.callbacks.finetuning_handler import OpenAIFineTuningHandler
from llama_index.finetuning.openai.validate_json import validate_json
from llama_index.finetuning.types import BaseLLMFinetuneEngine
from llama_index.llms.openai import OpenAI
logger = logging.getLogger(__name__)
class OpenAIFinetuneEngine(BaseLLMFinetuneEngine):
"""OpenAI Finetuning Engine."""
def __init__(
self,
base_model: str,
data_path: str,
verbose: bool = False,
start_job_id: Optional[str] = None,
validate_json: bool = True,
) -> None:
"""Init params."""
self.base_model = base_model
self.data_path = data_path
self._verbose = verbose
self._validate_json = validate_json
self._start_job: Optional[Any] = None
self._client = SyncOpenAI(api_key=os.getenv("OPENAI_API_KEY", None))
if start_job_id is not None:
self._start_job = self._client.fine_tuning.jobs.retrieve(start_job_id)
@classmethod
def from_finetuning_handler(
cls,
finetuning_handler: OpenAIFineTuningHandler,
base_model: str,
data_path: str,
**kwargs: Any,
) -> "OpenAIFinetuneEngine":
"""
Initialize from finetuning handler.
Used to finetune an OpenAI model into another
OpenAI model (e.g. gpt-3.5-turbo on top of GPT-4).
"""
finetuning_handler.save_finetuning_events(data_path)
return cls(base_model=base_model, data_path=data_path, **kwargs)
def finetune(self) -> None:
"""Finetune model."""
if self._validate_json:
validate_json(self.data_path)
# TODO: figure out how to specify file name in the new API
# file_name = os.path.basename(self.data_path)
# upload file
with open(self.data_path, "rb") as f:
output = self._client.files.create(file=f, purpose="fine-tune")
logger.info("File uploaded...")
if self._verbose:
print("File uploaded...")
# launch training
while True:
try:
job_output = self._client.fine_tuning.jobs.create(
training_file=output.id, model=self.base_model
)
self._start_job = job_output
break
except openai.BadRequestError:
print("Waiting for file to be ready...")
time.sleep(60)
info_str = (
f"Training job {output.id} launched. "
"You will be emailed when it's complete."
)
logger.info(info_str)
if self._verbose:
print(info_str)
def get_current_job(self) -> FineTuningJob:
"""Get current job."""
# validate that it works
if not self._start_job:
raise ValueError("Must call finetune() first")
# try getting id, make sure that run succeeded
job_id = self._start_job.id
return self._client.fine_tuning.jobs.retrieve(job_id)
def get_finetuned_model(self, **model_kwargs: Any) -> LLM:
"""Gets finetuned model."""
current_job = self.get_current_job()
job_id = current_job.id
status = current_job.status
model_id = current_job.fine_tuned_model
if model_id is None:
raise ValueError(
f"Job {job_id} does not have a finetuned model id ready yet."
)
if status != "succeeded":
raise ValueError(f"Job {job_id} has status {status}, cannot get model")
return OpenAI(model=model_id, **model_kwargs)
|
"""OpenAI Finetuning."""
import logging
import os
import time
from typing import Any, Optional
import openai
from openai import OpenAI as SyncOpenAI
from openai.types.fine_tuning import FineTuningJob
from llama_index.core.llms.llm import LLM
from llama_index.finetuning.callbacks.finetuning_handler import OpenAIFineTuningHandler
from llama_index.finetuning.openai.validate_json import validate_json
from llama_index.finetuning.types import BaseLLMFinetuneEngine
from llama_index.llms.openai import OpenAI
logger = logging.getLogger(__name__)
class OpenAIFinetuneEngine(BaseLLMFinetuneEngine):
"""OpenAI Finetuning Engine."""
def __init__(
self,
base_model: str,
data_path: str,
verbose: bool = False,
start_job_id: Optional[str] = None,
validate_json: bool = True,
) -> None:
"""Init params."""
self.base_model = base_model
self.data_path = data_path
self._verbose = verbose
self._validate_json = validate_json
self._start_job: Optional[Any] = None
self._client = SyncOpenAI(api_key=os.getenv("OPENAI_API_KEY", None))
if start_job_id is not None:
self._start_job = self._client.fine_tuning.jobs.retrieve(start_job_id)
@classmethod
def from_finetuning_handler(
cls,
finetuning_handler: OpenAIFineTuningHandler,
base_model: str,
data_path: str,
**kwargs: Any,
) -> "OpenAIFinetuneEngine":
"""Initialize from finetuning handler.
Used to finetune an OpenAI model into another
OpenAI model (e.g. gpt-3.5-turbo on top of GPT-4).
"""
finetuning_handler.save_finetuning_events(data_path)
return cls(base_model=base_model, data_path=data_path, **kwargs)
def finetune(self) -> None:
"""Finetune model."""
if self._validate_json:
validate_json(self.data_path)
# TODO: figure out how to specify file name in the new API
# file_name = os.path.basename(self.data_path)
# upload file
with open(self.data_path, "rb") as f:
output = self._client.files.create(file=f, purpose="fine-tune")
logger.info("File uploaded...")
if self._verbose:
print("File uploaded...")
# launch training
while True:
try:
job_output = self._client.fine_tuning.jobs.create(
training_file=output.id, model=self.base_model
)
self._start_job = job_output
break
except openai.BadRequestError:
print("Waiting for file to be ready...")
time.sleep(60)
info_str = (
f"Training job {output.id} launched. "
"You will be emailed when it's complete."
)
logger.info(info_str)
if self._verbose:
print(info_str)
def get_current_job(self) -> FineTuningJob:
"""Get current job."""
# validate that it works
if not self._start_job:
raise ValueError("Must call finetune() first")
# try getting id, make sure that run succeeded
job_id = self._start_job.id
return self._client.fine_tuning.jobs.retrieve(job_id)
def get_finetuned_model(self, **model_kwargs: Any) -> LLM:
"""Gets finetuned model."""
current_job = self.get_current_job()
job_id = current_job.id
status = current_job.status
model_id = current_job.fine_tuned_model
if model_id is None:
raise ValueError(
f"Job {job_id} does not have a finetuned model id ready yet."
)
if status != "succeeded":
raise ValueError(f"Job {job_id} has status {status}, cannot get model")
return OpenAI(model=model_id, **model_kwargs)
|
import torchaudio
try:
torchaudio._extension._load_lib("libtorchaudio_decoder")
from .ctc_decoder import Hypothesis, CTCDecoder, ctc_decoder, lexicon_decoder, download_pretrained_files
except ImportError as err:
raise ImportError(
"flashlight decoder bindings are required to use this functionality. "
"Please set BUILD_CTC_DECODER=1 when building from source."
) from err
__all__ = [
"Hypothesis",
"CTCDecoder",
"ctc_decoder",
"lexicon_decoder",
"download_pretrained_files",
]
|
import torchaudio
try:
torchaudio._extension._load_lib("libtorchaudio_decoder")
from .ctc_decoder import Hypothesis, LexiconDecoder, lexicon_decoder, download_pretrained_files
except ImportError as err:
raise ImportError(
"flashlight decoder bindings are required to use this functionality. "
"Please set BUILD_CTC_DECODER=1 when building from source."
) from err
__all__ = [
"Hypothesis",
"LexiconDecoder",
"lexicon_decoder",
"download_pretrained_files",
]
|
from typing import Dict, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.gmail.base import GmailBaseTool
class GetThreadSchema(BaseModel):
"""Input for GetMessageTool."""
# From https://support.google.com/mail/answer/7190?hl=en
thread_id: str = Field(
...,
description="The thread ID.",
)
class GmailGetThread(GmailBaseTool):
"""Tool that gets a thread by ID from Gmail."""
name: str = "get_gmail_thread"
description: str = (
"Use this tool to search for email messages."
" The input must be a valid Gmail query."
" The output is a JSON list of messages."
)
args_schema: Type[GetThreadSchema] = GetThreadSchema
def _run(
self,
thread_id: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Dict:
"""Run the tool."""
query = self.api_resource.users().threads().get(userId="me", id=thread_id)
thread_data = query.execute()
if not isinstance(thread_data, dict):
raise ValueError("The output of the query must be a list.")
messages = thread_data["messages"]
thread_data["messages"] = []
keys_to_keep = ["id", "snippet", "snippet"]
# TODO: Parse body.
for message in messages:
thread_data["messages"].append(
{k: message[k] for k in keys_to_keep if k in message}
)
return thread_data
|
from typing import Dict, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.gmail.base import GmailBaseTool
class GetThreadSchema(BaseModel):
"""Input for GetMessageTool."""
# From https://support.google.com/mail/answer/7190?hl=en
thread_id: str = Field(
...,
description="The thread ID.",
)
class GmailGetThread(GmailBaseTool): # type: ignore[override, override]
"""Tool that gets a thread by ID from Gmail."""
name: str = "get_gmail_thread"
description: str = (
"Use this tool to search for email messages."
" The input must be a valid Gmail query."
" The output is a JSON list of messages."
)
args_schema: Type[GetThreadSchema] = GetThreadSchema
def _run(
self,
thread_id: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Dict:
"""Run the tool."""
query = self.api_resource.users().threads().get(userId="me", id=thread_id)
thread_data = query.execute()
if not isinstance(thread_data, dict):
raise ValueError("The output of the query must be a list.")
messages = thread_data["messages"]
thread_data["messages"] = []
keys_to_keep = ["id", "snippet", "snippet"]
# TODO: Parse body.
for message in messages:
thread_data["messages"].append(
{k: message[k] for k in keys_to_keep if k in message}
)
return thread_data
|
import os
import re
import subprocess
from keras.src import backend
# For torch, use index url to avoid installing nvidia drivers for the test.
BACKEND_REQ = {
"tensorflow": ("tensorflow-cpu", ""),
"torch": (
"torch",
"--extra-index-url https://download.pytorch.org/whl/cpu ",
),
"jax": ("jax[cpu]", ""),
"openvino": ("openvino", ""),
}
def setup_package():
subprocess.run("rm -rf tmp_build_dir", shell=True)
build_process = subprocess.run(
"python3 pip_build.py",
capture_output=True,
text=True,
shell=True,
)
print(build_process.stdout)
whl_path = re.findall(
r"[^\s]*\.whl",
build_process.stdout,
)
if not whl_path:
print(build_process.stdout)
print(build_process.stderr)
raise ValueError("Installing Keras package unsuccessful. ")
return whl_path[-1]
def create_virtualenv():
env_setup = [
# Create virtual environment
"python3 -m venv test_env",
]
os.environ["PATH"] = (
"/test_env/bin/" + os.pathsep + os.environ.get("PATH", "")
)
run_commands_local(env_setup)
def manage_venv_installs(whl_path):
other_backends = list(set(BACKEND_REQ.keys()) - {backend.backend()})
backend_pkg, backend_extra_url = BACKEND_REQ[backend.backend()]
install_setup = [
# Installs the backend's package and common requirements
"pip install " + backend_extra_url + backend_pkg,
"pip install -r requirements-common.txt",
"pip install pytest",
# Ensure other backends are uninstalled
"pip uninstall -y "
+ BACKEND_REQ[other_backends[0]][0]
+ " "
+ BACKEND_REQ[other_backends[1]][0]
+ " "
+ BACKEND_REQ[other_backends[2]][0],
# Install `.whl` package
"pip install " + whl_path,
]
run_commands_venv(install_setup)
def run_keras_flow():
test_script = [
# Runs the example script
"python -m pytest integration_tests/basic_full_flow.py",
]
run_commands_venv(test_script)
def cleanup():
cleanup_script = [
# Exits virtual environment, deletes files, and any
# miscellaneous install logs
"exit",
"rm -rf test_env",
"rm -rf tmp_build_dir",
"rm -f *+cpu",
]
run_commands_local(cleanup_script)
def run_commands_local(commands):
for command in commands:
print(f"Running command: {command}")
subprocess.run(command, shell=True)
def run_commands_venv(commands):
for command in commands:
print(f"Running command: {command}")
cmd_with_args = command.split(" ")
cmd_with_args[0] = "test_env/bin/" + cmd_with_args[0]
p = subprocess.Popen(cmd_with_args)
assert p.wait() == 0
def test_keras_imports():
try:
# Ensures packages from all backends are installed.
# Builds Keras core package and returns package file path.
whl_path = setup_package()
# Creates and activates a virtual environment.
create_virtualenv()
# Ensures the backend's package is installed
# and the other backends are uninstalled.
manage_venv_installs(whl_path)
# Runs test of basic flow in Keras Core.
# Tests for backend-specific imports and `model.fit()`.
run_keras_flow()
# Removes virtual environment and associated files
finally:
cleanup()
if __name__ == "__main__":
test_keras_imports()
|
import os
import re
import subprocess
from keras.src import backend
# For torch, use index url to avoid installing nvidia drivers for the test.
BACKEND_REQ = {
"tensorflow": ("tensorflow-cpu", ""),
"torch": (
"torch",
"--extra-index-url https://download.pytorch.org/whl/cpu ",
),
"jax": ("jax[cpu]", ""),
"openvino": ("openvino", ""),
}
def setup_package():
subprocess.run("rm -rf tmp_build_dir", shell=True)
build_process = subprocess.run(
"python3 pip_build.py",
capture_output=True,
text=True,
shell=True,
)
print(build_process.stdout)
whl_path = re.findall(
r"[^\s]*\.whl",
build_process.stdout,
)[-1]
if not whl_path:
print(build_process.stderr)
raise ValueError("Installing Keras package unsuccessful. ")
return whl_path
def create_virtualenv():
env_setup = [
# Create virtual environment
"python3 -m venv test_env",
]
os.environ["PATH"] = (
"/test_env/bin/" + os.pathsep + os.environ.get("PATH", "")
)
run_commands_local(env_setup)
def manage_venv_installs(whl_path):
other_backends = list(set(BACKEND_REQ.keys()) - {backend.backend()})
backend_pkg, backend_extra_url = BACKEND_REQ[backend.backend()]
install_setup = [
# Installs the backend's package and common requirements
"pip install " + backend_extra_url + backend_pkg,
"pip install -r requirements-common.txt",
"pip install pytest",
# Ensure other backends are uninstalled
"pip uninstall -y "
+ BACKEND_REQ[other_backends[0]][0]
+ " "
+ BACKEND_REQ[other_backends[1]][0]
+ " "
+ BACKEND_REQ[other_backends[2]][0],
# Install `.whl` package
"pip install " + whl_path,
]
run_commands_venv(install_setup)
def run_keras_flow():
test_script = [
# Runs the example script
"python -m pytest integration_tests/basic_full_flow.py",
]
run_commands_venv(test_script)
def cleanup():
cleanup_script = [
# Exits virtual environment, deletes files, and any
# miscellaneous install logs
"exit",
"rm -rf test_env",
"rm -rf tmp_build_dir",
"rm -f *+cpu",
]
run_commands_local(cleanup_script)
def run_commands_local(commands):
for command in commands:
print(f"Running command: {command}")
subprocess.run(command, shell=True)
def run_commands_venv(commands):
for command in commands:
print(f"Running command: {command}")
cmd_with_args = command.split(" ")
cmd_with_args[0] = "test_env/bin/" + cmd_with_args[0]
p = subprocess.Popen(cmd_with_args)
assert p.wait() == 0
def test_keras_imports():
try:
# Ensures packages from all backends are installed.
# Builds Keras core package and returns package file path.
whl_path = setup_package()
# Creates and activates a virtual environment.
create_virtualenv()
# Ensures the backend's package is installed
# and the other backends are uninstalled.
manage_venv_installs(whl_path)
# Runs test of basic flow in Keras Core.
# Tests for backend-specific imports and `model.fit()`.
run_keras_flow()
# Removes virtual environment and associated files
finally:
cleanup()
if __name__ == "__main__":
test_keras_imports()
|
"""Test embedding utility functions."""
import numpy as np
from llama_index.core.indices.query.embedding_utils import (
get_top_k_embeddings,
get_top_k_mmr_embeddings,
)
def test_get_top_k_mmr_embeddings() -> None:
"""Test Maximum Marginal Relevance."""
# Results score should follow from the mmr algorithm
query_embedding = [5.0, 0.0, 0.0]
embeddings = [[4.0, 3.0, 0.0], [3.0, 4.0, 0.0], [-4.0, 3.0, 0.0]]
result_similarities, result_ids = get_top_k_mmr_embeddings(
query_embedding, embeddings, mmr_threshold=0.8
)
assert np.isclose(0.8 * 4 / 5, result_similarities[0], atol=0.00001)
assert np.isclose(
0.8 * 3 / 5 - (1 - 0.8) * (3 * 4 / 25 + 3 * 4 / 25),
result_similarities[1],
atol=0.00001,
)
assert np.isclose(
0.8 * -4 / 5 - (1 - 0.8) * (3 * -4 / 25 + 4 * 3 / 25),
result_similarities[2],
atol=0.00001,
)
assert result_ids == [0, 1, 2]
# Tests that if the first embedding vector is close to the second,
# it will return the third
query_embedding = [1.0, 0.0, 1.0]
embeddings = [[1.0, 0.0, 0.9], [1.0, 0.0, 0.8], [0.7, 0.0, 1.0]]
_, result_ids = get_top_k_mmr_embeddings(
query_embedding, embeddings, mmr_threshold=0.5
)
assert result_ids == [0, 2, 1]
# Tests that embedding ids map properly to results
_, result_ids = get_top_k_mmr_embeddings(
query_embedding, embeddings, embedding_ids=["A", "B", "C"], mmr_threshold=0.5
)
assert result_ids == ["A", "C", "B"]
# Test that it will go back to the original order under a high threshold
_, result_ids = get_top_k_mmr_embeddings(
query_embedding, embeddings, mmr_threshold=1
)
assert result_ids == [0, 1, 2]
# Test similarity_top_k works
_, result_ids = get_top_k_mmr_embeddings(
query_embedding, embeddings, mmr_threshold=1, similarity_top_k=2
)
assert result_ids == [0, 1]
# Test the results for get_top_k_embeddings and get_top_k_mmr_embeddings are the
# same for threshold = 1
query_embedding = [10, 23, 90, 78]
embeddings = [[1, 23, 89, 68], [1, 74, 144, 23], [0.23, 0.0, 1.0, 9]]
result_similarities_no_mmr, result_ids_no_mmr = get_top_k_embeddings(
query_embedding, embeddings
)
result_similarities, result_ids = get_top_k_mmr_embeddings(
query_embedding, embeddings, mmr_threshold=1
)
for result_no_mmr, result_with_mmr in zip(
result_similarities_no_mmr, result_similarities
):
assert np.isclose(result_no_mmr, result_with_mmr, atol=0.00001)
|
""" Test embedding utility functions."""
import numpy as np
from llama_index.core.indices.query.embedding_utils import (
get_top_k_embeddings,
get_top_k_mmr_embeddings,
)
def test_get_top_k_mmr_embeddings() -> None:
"""Test Maximum Marginal Relevance."""
# Results score should follow from the mmr algorithm
query_embedding = [5.0, 0.0, 0.0]
embeddings = [[4.0, 3.0, 0.0], [3.0, 4.0, 0.0], [-4.0, 3.0, 0.0]]
result_similarities, result_ids = get_top_k_mmr_embeddings(
query_embedding, embeddings, mmr_threshold=0.8
)
assert np.isclose(0.8 * 4 / 5, result_similarities[0], atol=0.00001)
assert np.isclose(
0.8 * 3 / 5 - (1 - 0.8) * (3 * 4 / 25 + 3 * 4 / 25),
result_similarities[1],
atol=0.00001,
)
assert np.isclose(
0.8 * -4 / 5 - (1 - 0.8) * (3 * -4 / 25 + 4 * 3 / 25),
result_similarities[2],
atol=0.00001,
)
assert result_ids == [0, 1, 2]
# Tests that if the first embedding vector is close to the second,
# it will return the third
query_embedding = [1.0, 0.0, 1.0]
embeddings = [[1.0, 0.0, 0.9], [1.0, 0.0, 0.8], [0.7, 0.0, 1.0]]
_, result_ids = get_top_k_mmr_embeddings(
query_embedding, embeddings, mmr_threshold=0.5
)
assert result_ids == [0, 2, 1]
# Tests that embedding ids map properly to results
_, result_ids = get_top_k_mmr_embeddings(
query_embedding, embeddings, embedding_ids=["A", "B", "C"], mmr_threshold=0.5
)
assert result_ids == ["A", "C", "B"]
# Test that it will go back to the original order under a high threshold
_, result_ids = get_top_k_mmr_embeddings(
query_embedding, embeddings, mmr_threshold=1
)
assert result_ids == [0, 1, 2]
# Test similarity_top_k works
_, result_ids = get_top_k_mmr_embeddings(
query_embedding, embeddings, mmr_threshold=1, similarity_top_k=2
)
assert result_ids == [0, 1]
# Test the results for get_top_k_embeddings and get_top_k_mmr_embeddings are the
# same for threshold = 1
query_embedding = [10, 23, 90, 78]
embeddings = [[1, 23, 89, 68], [1, 74, 144, 23], [0.23, 0.0, 1.0, 9]]
result_similarities_no_mmr, result_ids_no_mmr = get_top_k_embeddings(
query_embedding, embeddings
)
result_similarities, result_ids = get_top_k_mmr_embeddings(
query_embedding, embeddings, mmr_threshold=1
)
for result_no_mmr, result_with_mmr in zip(
result_similarities_no_mmr, result_similarities
):
assert np.isclose(result_no_mmr, result_with_mmr, atol=0.00001)
|
from markitdown import MarkItDown
from llama_index.core.bridge.pydantic import BaseModel, model_validator
import os
from pathlib import Path
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from typing import Tuple, Optional, Union, List
from typing_extensions import Self
def is_empty(list_it: list) -> Tuple[bool, Optional[list]]:
if len(list_it) == 0:
return True, None
return False, list_it
class ValidFilePath(BaseModel):
file_path: Union[str, Path, List[str], List[Path]]
@model_validator(mode="after")
def validate_file_path(self) -> Self:
if isinstance(self.file_path, str):
if not Path(self.file_path).is_dir():
if not Path(self.file_path).is_file():
raise ValueError("File or directory path does not exist")
else:
dir_files = [self.file_path]
else:
dir_files = []
for root, _, files in os.walk(self.file_path):
for el in files:
dir_files.append(os.path.join(root, el))
self.file_path = dir_files
elif isinstance(self.file_path, Path):
if not self.file_path.is_dir():
if not self.file_path.is_file():
raise ValueError("File or directory path does not exist")
else:
dir_files = [self.file_path]
else:
dir_files = []
for root, _, files in os.walk(self.file_path):
for el in files:
dir_files.append(os.path.join(root, el))
self.file_path = dir_files
empty, fls = is_empty(self.file_path)
if empty:
raise ValueError("There is no file to parse!")
else:
files = []
if isinstance(fls[0], str):
for fl in fls:
if Path(fl).is_file() and os.path.splitext(fl)[1] in [
".docx",
".html",
".xml",
".csv",
".pdf",
".pptx",
".xlsx",
".json",
".zip",
".txt",
"",
".md",
]:
files.append(fl)
else:
for fl in fls:
if fl.is_file() and os.path.splitext(fl)[1] in [
".docx",
".html",
".xml",
".csv",
".pdf",
".pptx",
".xlsx",
".json",
".zip",
".txt",
"",
".md",
]:
files.append(fl.__str__())
self.file_path = files
return self
class MarkItDownReader(BaseReader):
"""
MarkItDownReader is a document reader that utilizes the MarkItDown parser to convert files or collections of files into Document objects.
Methods
-------
load_data(file_path: str | Path | Iterable[str] | Iterable[Path]) -> List[Document]
Loads and parses a directory (if `file_path` is `str` or `Path`) or a list of files specified by `file_path` using the MarkItDown parser.
Returns a list of Document objects, each containing the text content and metadata such as file path, file type, and content length.
"""
_reader: MarkItDown = MarkItDown()
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "MarkItDownReader"
def load_data(
self,
file_path: Union[str, Path, List[str], List[Path]],
**kwargs,
) -> List[Document]:
docs: List[Document] = []
fl_pt = ValidFilePath(file_path=file_path)
fs = fl_pt.file_path
for f in fs:
res = self._reader.convert(f)
docs.append(
Document(
text=res.text_content,
metadata={
"file_path": f.__str__(),
"file_type": os.path.splitext(f)[1],
"content_length": len(res.text_content),
},
)
)
return docs
|
from markitdown import MarkItDown
from llama_index.core.bridge.pydantic import BaseModel, model_validator
import os
from pathlib import Path
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from typing import Tuple, Optional, Union, List
from typing_extensions import Self
def is_empty(list_it: list) -> Tuple[bool, Optional[list]]:
if len(list_it) == 0:
return True, None
return False, list_it
class ValidFilePath(BaseModel):
file_path: Union[str, Path, List[str], List[Path]]
@model_validator(mode="after")
def validate_file_path(self) -> Self:
if isinstance(self.file_path, str):
if not Path(self.file_path).is_dir():
if not Path(self.file_path).is_file():
raise ValueError("File or directory path does not exist")
else:
dir_files = [self.file_path]
else:
dir_files = []
for root, _, files in os.walk(self.file_path):
for el in files:
dir_files.append(os.path.join(root, el))
self.file_path = dir_files
elif isinstance(self.file_path, Path):
if not self.file_path.is_dir():
if not self.file_path.is_file():
raise ValueError("File or directory path does not exist")
else:
dir_files = [self.file_path]
else:
dir_files = []
for root, _, files in os.walk(self.file_path):
for el in files:
dir_files.append(os.path.join(root, el))
self.file_path = dir_files
empty, fls = is_empty(self.file_path)
if empty:
raise ValueError("There is no file to parse!")
else:
files = []
if isinstance(fls[0], str):
for fl in fls:
if Path(fl).is_file() and os.path.splitext(fl)[1] in [".docx", ".html", ".xml", ".csv", ".pdf", ".pptx", ".xlsx", ".json", ".zip", ".txt", "", ".md"]:
files.append(fl)
else:
for fl in fls:
if fl.is_file() and os.path.splitext(fl)[1] in [".docx", ".html", ".xml", ".csv", ".pdf", ".pptx", ".xlsx", ".json", ".zip", ".txt", "", ".md"]:
files.append(fl.__str__())
self.file_path = files
return self
class MarkItDownReader(BaseReader):
"""
MarkItDownReader is a document reader that utilizes the MarkItDown parser to convert files or collections of files into Document objects.
Methods
-------
load_data(file_path: str | Path | Iterable[str] | Iterable[Path]) -> List[Document]
Loads and parses a directory (if `file_path` is `str` or `Path`) or a list of files specified by `file_path` using the MarkItDown parser.
Returns a list of Document objects, each containing the text content and metadata such as file path, file type, and content length.
"""
_reader: MarkItDown = MarkItDown()
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "MarkItDownReader"
def load_data(
self,
file_path: Union[str, Path, List[str], List[Path]],
**kwargs,
) -> List[Document]:
docs: List[Document] = []
fl_pt = ValidFilePath(file_path=file_path)
fs = fl_pt.file_path
for f in fs:
res = self._reader.convert(f)
docs.append(Document(text=res.text_content, metadata={"file_path": f.__str__(), "file_type": os.path.splitext(f)[1], "content_length": len(res.text_content)}))
return docs
|
from __future__ import annotations
from sentence_transformers.losses.GISTEmbedLoss import GISTEmbedLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseGISTEmbedLoss(GISTEmbedLoss):
def __init__(
self,
model: SparseEncoder,
guide: SparseEncoder,
temperature: float = 0.1,
) -> None:
"""
This loss is used to train a SparseEncoder model using the GISTEmbed algorithm.
It takes a model and a guide model as input, and uses the guide model to guide the
in-batch negative sample selection. The cosine similarity is used to compute the loss
and the temperature parameter is used to scale the cosine similarities.
You can apply different false-negative filtering strategies to discard hard negatives that are too similar to
the positive. Two strategies are supported:
- "absolute": Discards negatives whose similarity score is greater than or equal to ``positive_score - margin``.
- "relative": Discards negatives whose similarity score is greater than or equal to ``positive_score * (1 - margin)``.
Args:
model: SparseEncoder model based on a `transformers` model.
guide: SparseEncoder model to guide the in-batch negative sample selection.
temperature: Temperature parameter to scale the cosine similarities, default is 0.1 here adapted for Sparse embeddings,
might need some adaptations.
margin_strategy: Strategy used for false negative filtering. One of {"absolute", "relative"}.
margin: The margin value for filtering negatives. Defaults to 0.0, together with the "absolute" strategy,
this only removes negatives that are more similar to the query than the positive is to the query.
References:
- For further details, see: https://arxiv.org/abs/2402.16829
Requirements:
1. (anchor, positive, negative) triplets
2. (anchor, positive) pairs
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseMultipleNegativesRankingLoss` is similar to this loss, but it does not use
a guide model to guide the in-batch negative sample selection. `SparseGISTEmbedLoss` yields
a stronger training signal at the cost of some training overhead.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
# Initialize the SPLADE model
model = SparseEncoder("distilbert/distilbert-base-uncased")
guide = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SparseGISTEmbedLoss(model, guide=guide)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, guide=guide, temperature=temperature)
|
from __future__ import annotations
from sentence_transformers.losses.GISTEmbedLoss import GISTEmbedLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseGISTEmbedLoss(GISTEmbedLoss):
def __init__(
self,
model: SparseEncoder,
guide: SparseEncoder,
temperature: float = 0.01,
) -> None:
"""
This loss is used to train a SparseEncoder model using the GISTEmbed algorithm.
It takes a model and a guide model as input, and uses the guide model to guide the
in-batch negative sample selection. The cosine similarity is used to compute the loss
and the temperature parameter is used to scale the cosine similarities.
You can apply different false-negative filtering strategies to discard hard negatives that are too similar to
the positive. Two strategies are supported:
- "absolute": Discards negatives whose similarity score is greater than or equal to ``positive_score - margin``.
- "relative": Discards negatives whose similarity score is greater than or equal to ``positive_score * (1 - margin)``.
Args:
model: SparseEncoder model based on a `transformers` model.
guide: SparseEncoder model to guide the in-batch negative sample selection.
temperature: Temperature parameter to scale the cosine similarities.
margin_strategy: Strategy used for false negative filtering. One of {"absolute", "relative"}.
margin: The margin value for filtering negatives. Defaults to 0.0, together with the "absolute" strategy,
this only removes negatives that are more similar to the query than the positive is to the query.
References:
- For further details, see: https://arxiv.org/abs/2402.16829
Requirements:
1. (anchor, positive, negative) triplets
2. (anchor, positive) pairs
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseMultipleNegativesRankingLoss` is similar to this loss, but it does not use
a guide model to guide the in-batch negative sample selection. `SparseGISTEmbedLoss` yields
a stronger training signal at the cost of some training overhead.
Example:
::
# TODO: Add example but need to be sure it works first
"""
return super().__init__(model, guide=guide, temperature=temperature)
|
"""Argparser module for Flow"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.base import mixin_essential_parser
def mixin_flow_features_parser(parser):
"""Add the arguments for the Flow features to the parser
:param parser: the parser configure
"""
from jina.enums import FlowInspectType
gp = add_arg_group(parser, title='Flow Feature')
gp.add_argument(
'--uses',
type=str,
help='The YAML path represents a flow. It can be either a local file path or a URL.',
)
gp.add_argument(
'--reload',
action='store_true',
default=False,
help='If set, auto-reloading on file changes is enabled: the Flow will restart while blocked if YAML '
'configuration source is changed. This also applies apply to underlying Executors, if their source '
'code or YAML configuration has changed.',
)
gp.add_argument(
'--env',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The map of environment variables that are available inside runtime',
)
gp.add_argument(
'--inspect',
type=FlowInspectType.from_string,
choices=list(FlowInspectType),
default=FlowInspectType.COLLECT,
help='''
The strategy on those inspect deployments in the flow.
If `REMOVE` is given then all inspect deployments are removed when building the flow.
''',
)
def set_flow_parser(parser=None):
"""Set the parser for the flow
:param parser: an (optional) initial parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_essential_parser(parser)
mixin_flow_features_parser(parser)
return parser
|
"""Argparser module for Flow"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.base import mixin_essential_parser
def mixin_flow_features_parser(parser):
"""Add the arguments for the Flow features to the parser
:param parser: the parser configure
"""
from jina.enums import FlowInspectType
gp = add_arg_group(parser, title='Flow Feature')
gp.add_argument(
'--uses',
type=str,
help='The YAML path represents a flow. It can be either a local file path or a URL.',
)
gp.add_argument(
'--restart',
action='store_true',
default=False,
help='If set, the Flow will restart while blocked if the YAML configuration source is changed.'
)
gp.add_argument(
'--env',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The map of environment variables that are available inside runtime',
)
gp.add_argument(
'--inspect',
type=FlowInspectType.from_string,
choices=list(FlowInspectType),
default=FlowInspectType.COLLECT,
help='''
The strategy on those inspect deployments in the flow.
If `REMOVE` is given then all inspect deployments are removed when building the flow.
''',
)
def set_flow_parser(parser=None):
"""Set the parser for the flow
:param parser: an (optional) initial parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_essential_parser(parser)
mixin_flow_features_parser(parser)
return parser
|
from .faiss_lmdb import FaissLMDBSearcher
|
from .faiss_lmdb import FaissLMDBSearcher
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDoc):
"""
Document for handling text.
It can contain:
- a [`TextUrl`][docarray.typing.url.TextUrl] (`TextDoc.url`)
- a `str` (`TextDoc.text`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`TextDoc.embedding`)
- a `bytes` object (`TextDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import TextDoc
# use it directly
txt_doc = TextDoc(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
# model = MyEmbeddingModel()
# txt_doc.embedding = model(txt_doc.text)
```
You can initialize directly from a string:
```python
from docarray.documents import TextDoc
txt_doc = TextDoc('hello world')
```
You can extend this Document:
```python
from docarray.documents import TextDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(TextDoc):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
# model = MyEmbeddingModel()
# txt_doc.embedding = model(txt_doc.text)
# txt_doc.second_embedding = model(txt_doc.text)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image_doc: ImageDoc
text_doc: TextDoc
mmdoc = MultiModalDoc(
image_doc=ImageDoc(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/image-data/apple.png?raw=true'
),
text_doc=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.image_doc.tensor = mmdoc.image_doc.url.load()
# or
mmdoc.image_doc.bytes_ = mmdoc.image_doc.url.load_bytes()
mmdoc.image_doc.tensor = mmdoc.image_doc.bytes_.load()
```
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
excluding `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
```python
from docarray.documents import TextDoc
doc = TextDoc(text='This is the main text', url='exampleurl.com')
doc2 = TextDoc(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # True
```
"""
text: Optional[str]
url: Optional[TextUrl]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `TextDoc` behave the same as an `str`.
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
```python
from docarray.documents import TextDoc
t = TextDoc(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
```
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='TextDoc')
class TextDoc(BaseDoc):
"""
Document for handling text.
It can contain a TextUrl (`TextDoc.url`), a str (`TextDoc.text`),
and an AnyEmbedding (`TextDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import TextDoc
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can initialize directly from a string:
.. code-block:: python
from docarray.documents import TextDoc
txt_doc = Text('hello world')
You can extend this Document:
.. code-block:: python
from docarray.documents import TextDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
# or
mmdoc.text_doc.bytes_ = mmdoc.text_doc.url.load_bytes()
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
including `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
.. code-block:: python
from docarray.documents import TextDoc
doc = Text(text='This is the main text', url='exampleurl.com')
doc2 = Text(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # False, their ids are not equivalent
"""
text: Optional[str]
url: Optional[TextUrl]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `Text` behave the same as an `str`.
.. code-block:: python
from docarray.documents import Text
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
from os.path import join
from pathlib import Path
from typing import Any, Callable, List, Optional, Tuple, Union
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, list_dir, list_files
from .vision import VisionDataset
class Omniglot(VisionDataset):
"""`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where directory
``omniglot-py`` exists.
background (bool, optional): If True, creates dataset from the "background" set, otherwise
creates from the "evaluation" set. This terminology is defined by the authors.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset zip files from the internet and
puts it in root directory. If the zip files are already downloaded, they are not
downloaded again.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
folder = "omniglot-py"
download_url_prefix = "https://raw.githubusercontent.com/brendenlake/omniglot/master/python"
zips_md5 = {
"images_background": "68d2efa1b9178cc56df9314c21c6e718",
"images_evaluation": "6b91aef0f799c5bb55b94e3f2daec811",
}
def __init__(
self,
root: Union[str, Path],
background: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Optional[Callable[[Union[str, Path]], Any]] = None,
) -> None:
super().__init__(join(root, self.folder), transform=transform, target_transform=target_transform)
self.background = background
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self.target_folder = join(self.root, self._get_target_folder())
self._alphabets = list_dir(self.target_folder)
self._characters: List[str] = sum(
([join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets), []
)
self._character_images = [
[(image, idx) for image in list_files(join(self.target_folder, character), ".png")]
for idx, character in enumerate(self._characters)
]
self._flat_character_images: List[Tuple[str, int]] = sum(self._character_images, [])
self.loader = loader
def __len__(self) -> int:
return len(self._flat_character_images)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
image_name, character_class = self._flat_character_images[index]
image_path = join(self.target_folder, self._characters[character_class], image_name)
image = Image.open(image_path, mode="r").convert("L") if self.loader is None else self.loader(image_path)
if self.transform:
image = self.transform(image)
if self.target_transform:
character_class = self.target_transform(character_class)
return image, character_class
def _check_integrity(self) -> bool:
zip_filename = self._get_target_folder()
if not check_integrity(join(self.root, zip_filename + ".zip"), self.zips_md5[zip_filename]):
return False
return True
def download(self) -> None:
if self._check_integrity():
return
filename = self._get_target_folder()
zip_filename = filename + ".zip"
url = self.download_url_prefix + "/" + zip_filename
download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename])
def _get_target_folder(self) -> str:
return "images_background" if self.background else "images_evaluation"
|
from os.path import join
from pathlib import Path
from typing import Any, Callable, List, Optional, Tuple, Union
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, list_dir, list_files
from .vision import VisionDataset
class Omniglot(VisionDataset):
"""`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where directory
``omniglot-py`` exists.
background (bool, optional): If True, creates dataset from the "background" set, otherwise
creates from the "evaluation" set. This terminology is defined by the authors.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset zip files from the internet and
puts it in root directory. If the zip files are already downloaded, they are not
downloaded again.
"""
folder = "omniglot-py"
download_url_prefix = "https://raw.githubusercontent.com/brendenlake/omniglot/master/python"
zips_md5 = {
"images_background": "68d2efa1b9178cc56df9314c21c6e718",
"images_evaluation": "6b91aef0f799c5bb55b94e3f2daec811",
}
def __init__(
self,
root: Union[str, Path],
background: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(join(root, self.folder), transform=transform, target_transform=target_transform)
self.background = background
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self.target_folder = join(self.root, self._get_target_folder())
self._alphabets = list_dir(self.target_folder)
self._characters: List[str] = sum(
([join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets), []
)
self._character_images = [
[(image, idx) for image in list_files(join(self.target_folder, character), ".png")]
for idx, character in enumerate(self._characters)
]
self._flat_character_images: List[Tuple[str, int]] = sum(self._character_images, [])
def __len__(self) -> int:
return len(self._flat_character_images)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
image_name, character_class = self._flat_character_images[index]
image_path = join(self.target_folder, self._characters[character_class], image_name)
image = Image.open(image_path, mode="r").convert("L")
if self.transform:
image = self.transform(image)
if self.target_transform:
character_class = self.target_transform(character_class)
return image, character_class
def _check_integrity(self) -> bool:
zip_filename = self._get_target_folder()
if not check_integrity(join(self.root, zip_filename + ".zip"), self.zips_md5[zip_filename]):
return False
return True
def download(self) -> None:
if self._check_integrity():
return
filename = self._get_target_folder()
zip_filename = filename + ".zip"
url = self.download_url_prefix + "/" + zip_filename
download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename])
def _get_target_folder(self) -> str:
return "images_background" if self.background else "images_evaluation"
|
import types
from typing import TYPE_CHECKING
from docarray.index.backends.in_memory import InMemoryExactNNIndex
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401
from docarray.index.backends.elasticv7 import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.epsilla import EpsillaDocumentIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
from docarray.index.backends.milvus import MilvusDocumentIndex # noqa: F401
from docarray.index.backends.mongodb_atlas import ( # noqa: F401
MongoDBAtlasDocumentIndex,
)
from docarray.index.backends.qdrant import QdrantDocumentIndex # noqa: F401
from docarray.index.backends.redis import RedisDocumentIndex # noqa: F401
from docarray.index.backends.weaviate import WeaviateDocumentIndex # noqa: F401
__all__ = [
'InMemoryExactNNIndex',
'ElasticDocIndex',
'ElasticV7DocIndex',
'EpsillaDocumentIndex',
'QdrantDocumentIndex',
'WeaviateDocumentIndex',
'RedisDocumentIndex',
'MilvusDocumentIndex',
'MongoDBAtlasDocumentIndex',
]
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticDocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elasticv7 as lib
elif name == 'EpsillaDocumentIndex':
import_library('pyepsilla', raise_error=True)
import docarray.index.backends.epsilla as lib
elif name == 'QdrantDocumentIndex':
import_library('qdrant_client', raise_error=True)
import docarray.index.backends.qdrant as lib
elif name == 'WeaviateDocumentIndex':
import_library('weaviate', raise_error=True)
import docarray.index.backends.weaviate as lib
elif name == 'MilvusDocumentIndex':
import_library('pymilvus', raise_error=True)
import docarray.index.backends.milvus as lib
elif name == 'RedisDocumentIndex':
import_library('redis', raise_error=True)
import docarray.index.backends.redis as lib
elif name == 'MongoDBAtlasDocumentIndex':
import_library('pymongo', raise_error=True)
import docarray.index.backends.mongodb_atlas as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
import types
from typing import TYPE_CHECKING
from docarray.index.backends.in_memory import InMemoryExactNNIndex
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401
from docarray.index.backends.elasticv7 import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.epsilla import EpsillaDocumentIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
from docarray.index.backends.milvus import MilvusDocumentIndex # noqa: F401
from docarray.index.backends.qdrant import QdrantDocumentIndex # noqa: F401
from docarray.index.backends.redis import RedisDocumentIndex # noqa: F401
from docarray.index.backends.weaviate import WeaviateDocumentIndex # noqa: F401
__all__ = [
'InMemoryExactNNIndex',
'ElasticDocIndex',
'ElasticV7DocIndex',
'EpsillaDocumentIndex',
'QdrantDocumentIndex',
'WeaviateDocumentIndex',
'RedisDocumentIndex',
'MilvusDocumentIndex',
]
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticDocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elasticv7 as lib
elif name == 'EpsillaDocumentIndex':
import_library('pyepsilla', raise_error=True)
import docarray.index.backends.epsilla as lib
elif name == 'QdrantDocumentIndex':
import_library('qdrant_client', raise_error=True)
import docarray.index.backends.qdrant as lib
elif name == 'WeaviateDocumentIndex':
import_library('weaviate', raise_error=True)
import docarray.index.backends.weaviate as lib
elif name == 'MilvusDocumentIndex':
import_library('pymilvus', raise_error=True)
import docarray.index.backends.milvus as lib
elif name == 'RedisDocumentIndex':
import_library('redis', raise_error=True)
import docarray.index.backends.redis as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
|
import random
import pytest
from jina import Document, DocumentArray
@pytest.fixture
def documents_chunk():
document_array = DocumentArray()
document = Document(tags={'query_size': 35, 'query_price': 31, 'query_brand': 1})
for i in range(0, 10):
chunk = Document()
for j in range(0, 10):
match = Document(
tags={
'level': 'chunk',
}
)
match.scores['cosine'] = random.random()
match.parent_id = i
chunk.matches.append(match)
document.chunks.append(chunk)
document_array.extend([document])
return document_array
@pytest.fixture
def documents_chunk_chunk():
document_array = DocumentArray()
document = Document(tags={'query_size': 35, 'query_price': 31, 'query_brand': 1})
for i in range(0, 10):
chunk = Document()
for j in range(0, 10):
chunk_chunk = Document()
for k in range(0, 10):
match = Document(
tags={
'level': 'chunk',
}
)
match.scores['cosine'] = random.random()
match.parent_id = j
chunk_chunk.matches.append(match)
chunk.chunks.append(chunk_chunk)
document.chunks.append(chunk)
document_array.extend([document])
return document_array
|
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_ping_parser(parser=None):
"""Set the parser for `ping`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'target',
type=str,
choices=['flow', 'executor'],
help='The target type to ping',
default='executor',
)
parser.add_argument(
'host',
type=str,
help='The host address with port of a target Executor or a Flow, e.g. 0.0.0.0:8000',
)
parser.add_argument(
'--timeout',
type=int,
default=3000,
help='''
Timeout in millisecond of one check
-1 for waiting forever
''',
)
parser.add_argument(
'--retries',
type=int,
default=3,
help='The max number of tried health checks before exit with exit code 1',
)
return parser
|
"""Argparser module for pinging"""
from jina.parsers.base import set_base_parser
def set_ping_parser(parser=None):
"""Set the parser for `ping`
:param parser: an existing parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
parser.add_argument(
'host', type=str, help='The host address of the target Pod, e.g. 0.0.0.0'
)
parser.add_argument(
'port', type=int, help='The control port of the target deployment/pod'
)
parser.add_argument(
'--timeout',
type=int,
default=3000,
help='''
Timeout in millisecond of one check
-1 for waiting forever
''',
)
parser.add_argument(
'--retries',
type=int,
default=3,
help='The max number of tried health checks before exit with exit code 1',
)
return parser
|
_base_ = '../fast_rcnn/fast-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
sampler=dict(num=256))),
test_cfg=dict(rcnn=dict(score_thr=1e-3)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=300),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img', 'proposals']),
])
]
# TODO: support loading proposals
data = dict(
train=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl',
pipeline=train_pipeline),
val=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline),
test=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6),
sampler=dict(num=256))),
test_cfg=dict(rcnn=dict(score_thr=1e-3)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=300),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=None),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img', 'proposals']),
])
]
# TODO: support loading proposals
data = dict(
train=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_train2017.pkl',
pipeline=train_pipeline),
val=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline),
test=dict(
proposal_file=data_root + 'proposals/ga_rpn_r50_fpn_1x_val2017.pkl',
pipeline=test_pipeline))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
import asyncio
from typing import AsyncIterator, Iterator, Optional, Union
from jina.helper import get_or_reuse_loop
class _RequestsCounter:
"""Class used to wrap a count integer so that it can be updated inside methods.
.. code-block:: python
def count_increment(i: int, rc: _RequestsCounter):
i += 1
rc.count += 1
c_int = 0
c_rc = _RequestsCounter()
count_increment(c_int, c_rc)
assert c_int == 0
assert c_rc.count == 1
"""
count = 0
class AsyncRequestsIterator:
"""Iterator to allow async iteration of blocking/non-blocking iterator from the Client"""
def __init__(
self,
iterator: Union[Iterator, AsyncIterator],
request_counter: Optional[_RequestsCounter] = None,
prefetch: int = 0,
iterate_sync_in_thread: bool = True,
) -> None:
"""Async request iterator
:param iterator: request iterator
:param request_counter: counter of the numbers of request being handled at a given moment
:param prefetch: The max amount of requests to be handled at a given moment (0 disables feature)
:param iterate_sync_in_thread: if True, blocking iterators will call __next__ in a Thread.
"""
self.iterator = iterator
self._request_counter = request_counter
self._prefetch = prefetch
self._iterate_sync_in_thread = iterate_sync_in_thread
def iterator__next__(self):
"""
Executed inside a `ThreadPoolExecutor` via `loop.run_in_executor` to avoid following exception.
"StopIteration interacts badly with generators and cannot be raised into a Future"
:return: next request or None
"""
try:
return self.iterator.__next__()
except StopIteration:
return None
def __aiter__(self):
return self
async def __anext__(self):
if isinstance(self.iterator, Iterator):
"""
An `Iterator` indicates "blocking" code, which might block all tasks in the event loop.
Hence we iterate in the default executor provided by asyncio.
"""
if not self._iterate_sync_in_thread:
async def _get_next():
try:
req = self.iterator.__next__()
except StopIteration:
req = None
return req
request = await asyncio.create_task(_get_next())
else:
request = await get_or_reuse_loop().run_in_executor(
None, self.iterator__next__
)
"""
`iterator.__next__` can be executed directly and that'd raise `StopIteration` in the executor,
which raises the following exception while chaining states in futures.
"StopIteration interacts badly with generators and cannot be raised into a Future"
To avoid that, we handle the raise by a `return None`
"""
if request is None:
raise StopAsyncIteration
elif isinstance(self.iterator, AsyncIterator):
# we assume that `AsyncIterator` doesn't block the event loop
request = await self.iterator.__anext__()
if self._prefetch > 0:
while self._request_counter.count >= self._prefetch:
await asyncio.sleep(0)
return request
|
import asyncio
from typing import AsyncIterator, Iterator, Optional, Union
from jina.helper import get_or_reuse_loop
class _RequestsCounter:
"""Class used to wrap a count integer so that it can be updated inside methods.
.. code-block:: python
def count_increment(i: int, rc: _RequestsCounter):
i += 1
rc.count += 1
c_int = 0
c_rc = _RequestsCounter()
count_increment(c_int, c_rc)
assert c_int == 0
assert c_rc.count == 1
"""
count = 0
class AsyncRequestsIterator:
"""Iterator to allow async iteration of blocking/non-blocking iterator from the Client"""
def __init__(
self,
iterator: Union[Iterator, AsyncIterator],
request_counter: Optional[_RequestsCounter] = None,
prefetch: int = 0,
) -> None:
"""Async request iterator
:param iterator: request iterator
:param request_counter: counter of the numbers of request being handled at a given moment
:param prefetch: The max amount of requests to be handled at a given moment (0 disables feature)
"""
self.iterator = iterator
self._request_counter = request_counter
self._prefetch = prefetch
def iterator__next__(self):
"""
Executed inside a `ThreadPoolExecutor` via `loop.run_in_executor` to avoid following exception.
"StopIteration interacts badly with generators and cannot be raised into a Future"
:return: next request or None
"""
try:
return self.iterator.__next__()
except StopIteration:
return None
def __aiter__(self):
return self
async def __anext__(self):
if isinstance(self.iterator, Iterator):
"""
An `Iterator` indicates "blocking" code, which might block all tasks in the event loop.
Hence we iterate in the default executor provided by asyncio.
"""
request = await get_or_reuse_loop().run_in_executor(
None, self.iterator__next__
)
"""
`iterator.__next__` can be executed directly and that'd raise `StopIteration` in the executor,
which raises the following exception while chaining states in futures.
"StopIteration interacts badly with generators and cannot be raised into a Future"
To avoid that, we handle the raise by a `return None`
"""
if request is None:
raise StopAsyncIteration
elif isinstance(self.iterator, AsyncIterator):
# we assume that `AsyncIterator` doesn't block the event loop
request = await self.iterator.__anext__()
if self._prefetch > 0:
while self._request_counter.count >= self._prefetch:
await asyncio.sleep(0)
return request
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.applications import convnext as convnext
from keras.applications import densenet as densenet
from keras.applications import efficientnet as efficientnet
from keras.applications import efficientnet_v2 as efficientnet_v2
from keras.applications import imagenet_utils as imagenet_utils
from keras.applications import inception_resnet_v2 as inception_resnet_v2
from keras.applications import inception_v3 as inception_v3
from keras.applications import mobilenet as mobilenet
from keras.applications import mobilenet_v2 as mobilenet_v2
from keras.applications import mobilenet_v3 as mobilenet_v3
from keras.applications import nasnet as nasnet
from keras.applications import resnet as resnet
from keras.applications import resnet50 as resnet50
from keras.applications import resnet_v2 as resnet_v2
from keras.applications import vgg16 as vgg16
from keras.applications import vgg19 as vgg19
from keras.applications import xception as xception
from keras.src.applications.convnext import ConvNeXtBase as ConvNeXtBase
from keras.src.applications.convnext import ConvNeXtLarge as ConvNeXtLarge
from keras.src.applications.convnext import ConvNeXtSmall as ConvNeXtSmall
from keras.src.applications.convnext import ConvNeXtTiny as ConvNeXtTiny
from keras.src.applications.convnext import ConvNeXtXLarge as ConvNeXtXLarge
from keras.src.applications.densenet import DenseNet121 as DenseNet121
from keras.src.applications.densenet import DenseNet169 as DenseNet169
from keras.src.applications.densenet import DenseNet201 as DenseNet201
from keras.src.applications.efficientnet import EfficientNetB0 as EfficientNetB0
from keras.src.applications.efficientnet import EfficientNetB1 as EfficientNetB1
from keras.src.applications.efficientnet import EfficientNetB2 as EfficientNetB2
from keras.src.applications.efficientnet import EfficientNetB3 as EfficientNetB3
from keras.src.applications.efficientnet import EfficientNetB4 as EfficientNetB4
from keras.src.applications.efficientnet import EfficientNetB5 as EfficientNetB5
from keras.src.applications.efficientnet import EfficientNetB6 as EfficientNetB6
from keras.src.applications.efficientnet import EfficientNetB7 as EfficientNetB7
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B0 as EfficientNetV2B0,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B1 as EfficientNetV2B1,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B2 as EfficientNetV2B2,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B3 as EfficientNetV2B3,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2L as EfficientNetV2L,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2M as EfficientNetV2M,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2S as EfficientNetV2S,
)
from keras.src.applications.inception_resnet_v2 import (
InceptionResNetV2 as InceptionResNetV2,
)
from keras.src.applications.inception_v3 import InceptionV3 as InceptionV3
from keras.src.applications.mobilenet import MobileNet as MobileNet
from keras.src.applications.mobilenet_v2 import MobileNetV2 as MobileNetV2
from keras.src.applications.mobilenet_v3 import (
MobileNetV3Large as MobileNetV3Large,
)
from keras.src.applications.mobilenet_v3 import (
MobileNetV3Small as MobileNetV3Small,
)
from keras.src.applications.nasnet import NASNetLarge as NASNetLarge
from keras.src.applications.nasnet import NASNetMobile as NASNetMobile
from keras.src.applications.resnet import ResNet50 as ResNet50
from keras.src.applications.resnet import ResNet101 as ResNet101
from keras.src.applications.resnet import ResNet152 as ResNet152
from keras.src.applications.resnet_v2 import ResNet50V2 as ResNet50V2
from keras.src.applications.resnet_v2 import ResNet101V2 as ResNet101V2
from keras.src.applications.resnet_v2 import ResNet152V2 as ResNet152V2
from keras.src.applications.vgg16 import VGG16 as VGG16
from keras.src.applications.vgg19 import VGG19 as VGG19
from keras.src.applications.xception import Xception as Xception
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.applications import convnext
from keras.api.applications import densenet
from keras.api.applications import efficientnet
from keras.api.applications import efficientnet_v2
from keras.api.applications import imagenet_utils
from keras.api.applications import inception_resnet_v2
from keras.api.applications import inception_v3
from keras.api.applications import mobilenet
from keras.api.applications import mobilenet_v2
from keras.api.applications import mobilenet_v3
from keras.api.applications import nasnet
from keras.api.applications import resnet
from keras.api.applications import resnet50
from keras.api.applications import resnet_v2
from keras.api.applications import vgg16
from keras.api.applications import vgg19
from keras.api.applications import xception
from keras.src.applications.convnext import ConvNeXtBase
from keras.src.applications.convnext import ConvNeXtLarge
from keras.src.applications.convnext import ConvNeXtSmall
from keras.src.applications.convnext import ConvNeXtTiny
from keras.src.applications.convnext import ConvNeXtXLarge
from keras.src.applications.densenet import DenseNet121
from keras.src.applications.densenet import DenseNet169
from keras.src.applications.densenet import DenseNet201
from keras.src.applications.efficientnet import EfficientNetB0
from keras.src.applications.efficientnet import EfficientNetB1
from keras.src.applications.efficientnet import EfficientNetB2
from keras.src.applications.efficientnet import EfficientNetB3
from keras.src.applications.efficientnet import EfficientNetB4
from keras.src.applications.efficientnet import EfficientNetB5
from keras.src.applications.efficientnet import EfficientNetB6
from keras.src.applications.efficientnet import EfficientNetB7
from keras.src.applications.efficientnet_v2 import EfficientNetV2B0
from keras.src.applications.efficientnet_v2 import EfficientNetV2B1
from keras.src.applications.efficientnet_v2 import EfficientNetV2B2
from keras.src.applications.efficientnet_v2 import EfficientNetV2B3
from keras.src.applications.efficientnet_v2 import EfficientNetV2L
from keras.src.applications.efficientnet_v2 import EfficientNetV2M
from keras.src.applications.efficientnet_v2 import EfficientNetV2S
from keras.src.applications.inception_resnet_v2 import InceptionResNetV2
from keras.src.applications.inception_v3 import InceptionV3
from keras.src.applications.mobilenet import MobileNet
from keras.src.applications.mobilenet_v2 import MobileNetV2
from keras.src.applications.mobilenet_v3 import MobileNetV3Large
from keras.src.applications.mobilenet_v3 import MobileNetV3Small
from keras.src.applications.nasnet import NASNetLarge
from keras.src.applications.nasnet import NASNetMobile
from keras.src.applications.resnet import ResNet50
from keras.src.applications.resnet import ResNet101
from keras.src.applications.resnet import ResNet152
from keras.src.applications.resnet_v2 import ResNet50V2
from keras.src.applications.resnet_v2 import ResNet101V2
from keras.src.applications.resnet_v2 import ResNet152V2
from keras.src.applications.vgg16 import VGG16
from keras.src.applications.vgg19 import VGG19
from keras.src.applications.xception import Xception
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadEmptyAnnotations,
LoadImageFromNDArray, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixShapeResize, MinIoURandomCrop, MixUp,
Mosaic, Normalize, Pad, PhotoMetricDistortion,
RandomAffine, RandomCenterCropPad, RandomCrop,
RandomErasing, RandomFlip, RandomShift, Resize,
SegRescale, YOLOXHSVRandomAug)
from .wrappers import MultiBranch, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, LoadAnnotations, LoadEmptyAnnotations,
LoadImageFromNDArray, LoadMultiChannelImageFromFiles,
LoadPanopticAnnotations, LoadProposals)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, MinIoURandomCrop, MixUp, Mosaic, Normalize,
Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp'
]
|
from collections.abc import Sequence
from langchain_core.tools import BaseTool
def validate_tools_single_input(class_name: str, tools: Sequence[BaseTool]) -> None:
"""Validate tools for single input.
Args:
class_name: Name of the class.
tools: List of tools to validate.
Raises:
ValueError: If a multi-input tool is found in tools.
"""
for tool in tools:
if not tool.is_single_input:
msg = f"{class_name} does not support multi-input tool {tool.name}."
raise ValueError(msg)
|
from collections.abc import Sequence
from langchain_core.tools import BaseTool
def validate_tools_single_input(class_name: str, tools: Sequence[BaseTool]) -> None:
"""Validate tools for single input.
Args:
class_name: Name of the class.
tools: List of tools to validate.
Raises:
ValueError: If a multi-input tool is found in tools.
"""
for tool in tools:
if not tool.is_single_input:
raise ValueError(
f"{class_name} does not support multi-input tool {tool.name}."
)
|
"""Zapier Tool."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import ZapierNLAListActions, ZapierNLARunAction
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ZapierNLARunAction": "langchain_community.tools",
"ZapierNLAListActions": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ZapierNLAListActions",
"ZapierNLARunAction",
]
|
"""Zapier Tool."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import ZapierNLAListActions, ZapierNLARunAction
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ZapierNLARunAction": "langchain_community.tools",
"ZapierNLAListActions": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ZapierNLARunAction",
"ZapierNLAListActions",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage writer
WRITERS = Registry('writer')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of evaluators for computing metrics
EVALUATORS = Registry('evaluator')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage writer
WRITERS = Registry('writer')
|
from collections.abc import Sequence
from typing import Callable
from langchain_core.agents import AgentAction
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain.agents.format_scratchpad.tools import (
format_to_tool_messages,
)
from langchain.agents.output_parsers.tools import ToolsAgentOutputParser
MessageFormatter = Callable[[Sequence[tuple[AgentAction, str]]], list[BaseMessage]]
def create_tool_calling_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
*,
message_formatter: MessageFormatter = format_to_tool_messages,
) -> Runnable:
"""Create an agent that uses tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
message_formatter: Formatter function to convert (AgentAction, tool output)
tuples into FunctionMessages.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Example:
.. code-block:: python
from langchain.agents import AgentExecutor, create_tool_calling_agent, tool
from langchain_anthropic import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
model = ChatAnthropic(model="claude-3-opus-20240229")
@tool
def magic_function(input: int) -> int:
\"\"\"Applies a magic function to an input.\"\"\"
return input + 2
tools = [magic_function]
agent = create_tool_calling_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
agent_executor.invoke({"input": "what is the value of magic_function(3)?"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
``MessagesPlaceholder``. Intermediate agent actions and tool output
messages will be passed in here.
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables)
)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")
if not hasattr(llm, "bind_tools"):
raise ValueError(
"This function requires a .bind_tools method be implemented on the LLM.",
)
llm_with_tools = llm.bind_tools(tools)
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: message_formatter(x["intermediate_steps"])
)
| prompt
| llm_with_tools
| ToolsAgentOutputParser()
)
return agent
|
from typing import Callable, List, Sequence, Tuple
from langchain_core.agents import AgentAction
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain.agents.format_scratchpad.tools import (
format_to_tool_messages,
)
from langchain.agents.output_parsers.tools import ToolsAgentOutputParser
MessageFormatter = Callable[[Sequence[Tuple[AgentAction, str]]], List[BaseMessage]]
def create_tool_calling_agent(
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
prompt: ChatPromptTemplate,
*,
message_formatter: MessageFormatter = format_to_tool_messages,
) -> Runnable:
"""Create an agent that uses tools.
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use. See Prompt section below for more on the expected
input variables.
message_formatter: Formatter function to convert (AgentAction, tool output)
tuples into FunctionMessages.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
Example:
.. code-block:: python
from langchain.agents import AgentExecutor, create_tool_calling_agent, tool
from langchain_anthropic import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
model = ChatAnthropic(model="claude-3-opus-20240229")
@tool
def magic_function(input: int) -> int:
\"\"\"Applies a magic function to an input.\"\"\"
return input + 2
tools = [magic_function]
agent = create_tool_calling_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
agent_executor.invoke({"input": "what is the value of magic_function(3)?"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Prompt:
The agent prompt must have an `agent_scratchpad` key that is a
``MessagesPlaceholder``. Intermediate agent actions and tool output
messages will be passed in here.
"""
missing_vars = {"agent_scratchpad"}.difference(
prompt.input_variables + list(prompt.partial_variables)
)
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")
if not hasattr(llm, "bind_tools"):
raise ValueError(
"This function requires a .bind_tools method be implemented on the LLM.",
)
llm_with_tools = llm.bind_tools(tools)
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: message_formatter(x["intermediate_steps"])
)
| prompt
| llm_with_tools
| ToolsAgentOutputParser()
)
return agent
|
import logging
import pathlib
from postmarker.core import PostmarkClient
from postmarker.models.emails import EmailManager
from prisma.enums import NotificationType
from pydantic import BaseModel
from backend.data.notifications import (
NotificationEventModel,
NotificationTypeOverride,
T_co,
)
from backend.util.settings import Settings
from backend.util.text import TextFormatter
logger = logging.getLogger(__name__)
settings = Settings()
# The following is a workaround to get the type checker to recognize the EmailManager type
# This is a temporary solution and should be removed once the Postmark library is updated
# to support type annotations.
class TypedPostmarkClient(PostmarkClient):
emails: EmailManager
class Template(BaseModel):
subject_template: str
body_template: str
base_template: str
class EmailSender:
def __init__(self):
if settings.secrets.postmark_server_api_token:
self.postmark = TypedPostmarkClient(
server_token=settings.secrets.postmark_server_api_token
)
else:
logger.warning(
"Postmark server API token not found, email sending disabled"
)
self.postmark = None
self.formatter = TextFormatter()
def send_templated(
self,
notification: NotificationType,
user_email: str,
data: NotificationEventModel[T_co] | list[NotificationEventModel[T_co]],
):
"""Send an email to a user using a template pulled from the notification type"""
if not self.postmark:
logger.warning("Postmark client not initialized, email not sent")
return
template = self._get_template(notification)
try:
subject, full_message = self.formatter.format_email(
base_template=template.base_template,
subject_template=template.subject_template,
content_template=template.body_template,
data=data,
unsubscribe_link="https://platform.agpt.co/profile/settings",
)
except Exception as e:
logger.error(f"Error formatting full message: {e}")
raise e
self._send_email(user_email, subject, full_message)
def _get_template(self, notification: NotificationType):
# convert the notification type to a notification type override
notification_type_override = NotificationTypeOverride(notification)
# find the template in templates/name.html (the .template returns with the .html)
template_path = f"templates/{notification_type_override.template}.jinja2"
logger.debug(
f"Template full path: {pathlib.Path(__file__).parent / template_path}"
)
base_template_path = "templates/base.html.jinja2"
with open(pathlib.Path(__file__).parent / base_template_path, "r") as file:
base_template = file.read()
with open(pathlib.Path(__file__).parent / template_path, "r") as file:
template = file.read()
return Template(
subject_template=notification_type_override.subject,
body_template=template,
base_template=base_template,
)
def _send_email(self, user_email: str, subject: str, body: str):
if not self.postmark:
logger.warning("Email tried to send without postmark configured")
return
logger.debug(f"Sending email to {user_email} with subject {subject}")
self.postmark.emails.send(
From=settings.config.postmark_sender_email,
To=user_email,
Subject=subject,
HtmlBody=body,
)
|
import logging
import pathlib
from postmarker.core import PostmarkClient
from postmarker.models.emails import EmailManager
from prisma.enums import NotificationType
from pydantic import BaseModel
from backend.data.notifications import (
NotificationEventModel,
NotificationTypeOverride,
T_co,
)
from backend.util.settings import Settings
from backend.util.text import TextFormatter
logger = logging.getLogger(__name__)
settings = Settings()
# The following is a workaround to get the type checker to recognize the EmailManager type
# This is a temporary solution and should be removed once the Postmark library is updated
# to support type annotations.
class TypedPostmarkClient(PostmarkClient):
emails: EmailManager
class Template(BaseModel):
subject_template: str
body_template: str
base_template: str
class EmailSender:
def __init__(self):
if settings.secrets.postmark_server_api_token:
self.postmark = TypedPostmarkClient(
server_token=settings.secrets.postmark_server_api_token
)
else:
logger.warning(
"Postmark server API token not found, email sending disabled"
)
self.postmark = None
self.formatter = TextFormatter()
def send_templated(
self,
notification: NotificationType,
user_email: str,
data: NotificationEventModel[T_co] | list[NotificationEventModel[T_co]],
):
"""Send an email to a user using a template pulled from the notification type"""
if not self.postmark:
logger.warning("Postmark client not initialized, email not sent")
return
template = self._get_template(notification)
try:
subject, full_message = self.formatter.format_email(
base_template=template.base_template,
subject_template=template.subject_template,
content_template=template.body_template,
data=data,
unsubscribe_link="https://autogpt.com/unsubscribe",
)
except Exception as e:
logger.error(f"Error formatting full message: {e}")
raise e
self._send_email(user_email, subject, full_message)
def _get_template(self, notification: NotificationType):
# convert the notification type to a notification type override
notification_type_override = NotificationTypeOverride(notification)
# find the template in templates/name.html (the .template returns with the .html)
template_path = f"templates/{notification_type_override.template}.jinja2"
logger.debug(
f"Template full path: {pathlib.Path(__file__).parent / template_path}"
)
base_template_path = "templates/base.html.jinja2"
with open(pathlib.Path(__file__).parent / base_template_path, "r") as file:
base_template = file.read()
with open(pathlib.Path(__file__).parent / template_path, "r") as file:
template = file.read()
return Template(
subject_template=notification_type_override.subject,
body_template=template,
base_template=base_template,
)
def _send_email(self, user_email: str, subject: str, body: str):
if not self.postmark:
logger.warning("Email tried to send without postmark configured")
return
logger.debug(f"Sending email to {user_email} with subject {subject}")
self.postmark.emails.send(
From=settings.config.postmark_sender_email,
To=user_email,
Subject=subject,
HtmlBody=body,
)
|
import io
import pathlib
from collections import namedtuple
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.prototype.datapoints import Image, Label
from torchvision.prototype.datasets.utils import Dataset, GDriveResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from .._api import register_dataset, register_info
NAME = "pcam"
class PCAMH5Reader(IterDataPipe[Tuple[str, io.IOBase]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[str, io.IOBase]],
key: Optional[str] = None, # Note: this key thing might be very specific to the PCAM dataset
) -> None:
self.datapipe = datapipe
self.key = key
def __iter__(self) -> Iterator[Tuple[str, io.IOBase]]:
import h5py
for _, handle in self.datapipe:
try:
with h5py.File(handle) as data:
if self.key is not None:
data = data[self.key]
yield from data
finally:
handle.close()
_Resource = namedtuple("_Resource", ("file_name", "gdrive_id", "sha256"))
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=["0", "1"])
@register_dataset(NAME)
class PCAM(Dataset):
# TODO write proper docstring
"""PCAM Dataset
homepage="https://github.com/basveeling/pcam"
"""
def __init__(
self, root: Union[str, pathlib.Path], split: str = "train", *, skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("h5py",))
_RESOURCES = {
"train": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_train_x.h5.gz",
gdrive_id="1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2",
sha256="d619e741468a7ab35c7e4a75e6821b7e7e6c9411705d45708f2a0efc8960656c",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_train_y.h5.gz",
gdrive_id="1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
sha256="b74126d2c01b20d3661f9b46765d29cf4e4fba6faba29c8e0d09d406331ab75a",
),
),
"test": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_test_x.h5.gz",
gdrive_id="1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
sha256="79174c2201ad521602a5888be8f36ee10875f37403dd3f2086caf2182ef87245",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_test_y.h5.gz",
gdrive_id="17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
sha256="0a522005fccc8bbd04c5a117bfaf81d8da2676f03a29d7499f71d0a0bd6068ef",
),
),
"val": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_valid_x.h5.gz",
gdrive_id="1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
sha256="f82ee1670d027b4ec388048d9eabc2186b77c009655dae76d624c0ecb053ccb2",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_valid_y.h5.gz",
gdrive_id="1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
sha256="ce1ae30f08feb468447971cfd0472e7becd0ad96d877c64120c72571439ae48c",
),
),
}
def _resources(self) -> List[OnlineResource]:
return [ # = [images resource, targets resource]
GDriveResource(file_name=file_name, id=gdrive_id, sha256=sha256, preprocess="decompress")
for file_name, gdrive_id, sha256 in self._RESOURCES[self._split]
]
def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
image, target = data # They're both numpy arrays at this point
return {
"image": Image(image.transpose(2, 0, 1)),
"label": Label(target.item(), categories=self._categories),
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
images_dp = PCAMH5Reader(images_dp, key="x")
targets_dp = PCAMH5Reader(targets_dp, key="y")
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 262_144 if self._split == "train" else 32_768
|
import io
import pathlib
from collections import namedtuple
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.prototype import features
from torchvision.prototype.datasets.utils import Dataset, GDriveResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "pcam"
class PCAMH5Reader(IterDataPipe[Tuple[str, io.IOBase]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[str, io.IOBase]],
key: Optional[str] = None, # Note: this key thing might be very specific to the PCAM dataset
) -> None:
self.datapipe = datapipe
self.key = key
def __iter__(self) -> Iterator[Tuple[str, io.IOBase]]:
import h5py
for _, handle in self.datapipe:
try:
with h5py.File(handle) as data:
if self.key is not None:
data = data[self.key]
yield from data
finally:
handle.close()
_Resource = namedtuple("_Resource", ("file_name", "gdrive_id", "sha256"))
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=["0", "1"])
@register_dataset(NAME)
class PCAM(Dataset):
# TODO write proper docstring
"""PCAM Dataset
homepage="https://github.com/basveeling/pcam"
"""
def __init__(
self, root: Union[str, pathlib.Path], split: str = "train", *, skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("h5py",))
_RESOURCES = {
"train": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_train_x.h5.gz",
gdrive_id="1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2",
sha256="d619e741468a7ab35c7e4a75e6821b7e7e6c9411705d45708f2a0efc8960656c",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_train_y.h5.gz",
gdrive_id="1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
sha256="b74126d2c01b20d3661f9b46765d29cf4e4fba6faba29c8e0d09d406331ab75a",
),
),
"test": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_test_x.h5.gz",
gdrive_id="1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
sha256="79174c2201ad521602a5888be8f36ee10875f37403dd3f2086caf2182ef87245",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_test_y.h5.gz",
gdrive_id="17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
sha256="0a522005fccc8bbd04c5a117bfaf81d8da2676f03a29d7499f71d0a0bd6068ef",
),
),
"val": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_valid_x.h5.gz",
gdrive_id="1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
sha256="f82ee1670d027b4ec388048d9eabc2186b77c009655dae76d624c0ecb053ccb2",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_valid_y.h5.gz",
gdrive_id="1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
sha256="ce1ae30f08feb468447971cfd0472e7becd0ad96d877c64120c72571439ae48c",
),
),
}
def _resources(self) -> List[OnlineResource]:
return [ # = [images resource, targets resource]
GDriveResource(file_name=file_name, id=gdrive_id, sha256=sha256, preprocess="decompress")
for file_name, gdrive_id, sha256 in self._RESOURCES[self._split]
]
def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
image, target = data # They're both numpy arrays at this point
return {
"image": features.Image(image.transpose(2, 0, 1)),
"label": Label(target.item(), categories=self._categories),
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
images_dp = PCAMH5Reader(images_dp, key="x")
targets_dp = PCAMH5Reader(targets_dp, key="y")
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 262_144 if self._split == "train" else 32_768
|
import logging
import os
import sys
from torchaudio._internal.module_utils import fail_with_message, is_module_available, no_op
from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _init_dll_path, _init_ffmpeg, _init_sox, _load_lib
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_kaldi",
"fail_if_no_sox",
"fail_if_no_ffmpeg",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_KALDI_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"_FFMPEG_INITIALIZED",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# Kaldi and RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# Kaldi or RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_KALDI_AVAILABLE = False
_IS_ALIGN_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_KALDI_AVAILABLE = torchaudio.lib._torchaudio.is_kaldi_available()
_IS_ALIGN_AVAILABLE = torchaudio.lib._torchaudio.is_align_available()
# Similar to libtorchaudio, sox-related features should be importable when present.
#
# Note: This will be change in the future when sox is dynamically linked.
# At that point, this initialization should handle the case where
# sox integration is built but libsox is not found.
_SOX_INITIALIZED = False
if is_module_available("torchaudio.lib._torchaudio_sox"):
_init_sox()
_SOX_INITIALIZED = True
# Initialize FFmpeg-related features
_FFMPEG_INITIALIZED = False
if is_module_available("torchaudio.lib._torchaudio_ffmpeg"):
try:
_init_ffmpeg()
_FFMPEG_INITIALIZED = True
except Exception:
# The initialization of FFmpeg extension will fail if supported FFmpeg
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize ffmpeg bindings", exc_info=True)
fail_if_no_kaldi = (
no_op
if _IS_KALDI_AVAILABLE
else fail_with_message(
"requires kaldi extension, but TorchAudio is not compiled with it. Please build TorchAudio with kaldi support."
)
)
fail_if_no_sox = (
no_op
if _SOX_INITIALIZED
else fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. Please build TorchAudio with libsox support."
)
)
fail_if_no_ffmpeg = no_op if _FFMPEG_INITIALIZED else _fail_since_no_ffmpeg
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
fail_if_no_align = (
no_op
if _IS_ALIGN_AVAILABLE
else fail_with_message(
"Requires alignment extension, but TorchAudio is not compiled with it. \
Please build TorchAudio with alignment support."
)
)
|
import logging
import os
import sys
from torchaudio._internal.module_utils import fail_with_message, is_module_available, no_op
from .utils import _check_cuda_version, _fail_since_no_ffmpeg, _init_dll_path, _init_ffmpeg, _init_sox, _load_lib
_LG = logging.getLogger(__name__)
# Note:
# `_check_cuda_version` is not meant to be used by regular users.
# Builder uses it for debugging purpose, so we export it.
# https://github.com/pytorch/builder/blob/e2e4542b8eb0bdf491214451a1a4128bd606cce2/test/smoke_test/smoke_test.py#L80
__all__ = [
"fail_if_no_kaldi",
"fail_if_no_sox",
"fail_if_no_ffmpeg",
"_check_cuda_version",
"_IS_TORCHAUDIO_EXT_AVAILABLE",
"_IS_KALDI_AVAILABLE",
"_IS_RIR_AVAILABLE",
"_SOX_INITIALIZED",
"_FFMPEG_INITIALIZED",
]
if os.name == "nt" and (3, 8) <= sys.version_info < (3, 9):
_init_dll_path()
# When the extension module is built, we initialize it.
# In case of an error, we do not catch the failure as it suggests there is something
# wrong with the installation.
_IS_TORCHAUDIO_EXT_AVAILABLE = is_module_available("torchaudio.lib._torchaudio")
# Kaldi and RIR features are implemented in _torchaudio extension, but they can be individually
# turned on/off at build time. Available means that _torchaudio is loaded properly, and
# Kaldi or RIR features are found there.
_IS_RIR_AVAILABLE = False
_IS_KALDI_AVAILABLE = False
if _IS_TORCHAUDIO_EXT_AVAILABLE:
_load_lib("libtorchaudio")
import torchaudio.lib._torchaudio # noqa
_check_cuda_version()
_IS_RIR_AVAILABLE = torchaudio.lib._torchaudio.is_rir_available()
_IS_KALDI_AVAILABLE = torchaudio.lib._torchaudio.is_kaldi_available()
# Similar to libtorchaudio, sox-related features should be importable when present.
#
# Note: This will be change in the future when sox is dynamically linked.
# At that point, this initialization should handle the case where
# sox integration is built but libsox is not found.
_SOX_INITIALIZED = False
if is_module_available("torchaudio.lib._torchaudio_sox"):
_init_sox()
_SOX_INITIALIZED = True
# Initialize FFmpeg-related features
_FFMPEG_INITIALIZED = False
if is_module_available("torchaudio.lib._torchaudio_ffmpeg"):
try:
_init_ffmpeg()
_FFMPEG_INITIALIZED = True
except Exception:
# The initialization of FFmpeg extension will fail if supported FFmpeg
# libraries are not found in the system.
# Since the rest of the torchaudio works without it, we do not report the
# error here.
# The error will be raised when user code attempts to use these features.
_LG.debug("Failed to initialize ffmpeg bindings", exc_info=True)
fail_if_no_kaldi = (
no_op
if _IS_KALDI_AVAILABLE
else fail_with_message(
"requires kaldi extension, but TorchAudio is not compiled with it. Please build TorchAudio with kaldi support."
)
)
fail_if_no_sox = (
no_op
if _SOX_INITIALIZED
else fail_with_message(
"requires sox extension, but TorchAudio is not compiled with it. Please build TorchAudio with libsox support."
)
)
fail_if_no_ffmpeg = no_op if _FFMPEG_INITIALIZED else _fail_since_no_ffmpeg
fail_if_no_rir = (
no_op
if _IS_RIR_AVAILABLE
else fail_with_message(
"requires RIR extension, but TorchAudio is not compiled with it. Please build TorchAudio with RIR support."
)
)
|
"""
This script runs the evaluation of an SBERT msmarco model on the
MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product.
Usage:
python eval_msmarco.py model_name [max_corpus_size_in_thousands]
"""
import logging
import os
import sys
import tarfile
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Name of the SBERT model
model_name = sys.argv[1]
# You can limit the approx. max size of the corpus. Pass 100 as second parameter and the corpus has a size of approx 100k docs
corpus_max_size = int(sys.argv[2]) * 1000 if len(sys.argv) >= 3 else 0
#### Load model
model = SentenceTransformer(model_name)
### Data files
data_folder = "msmarco-data"
os.makedirs(data_folder, exist_ok=True)
collection_filepath = os.path.join(data_folder, "collection.tsv")
dev_queries_file = os.path.join(data_folder, "queries.dev.small.tsv")
qrels_filepath = os.path.join(data_folder, "qrels.dev.tsv")
### Download files if needed
if not os.path.exists(collection_filepath) or not os.path.exists(dev_queries_file):
tar_filepath = os.path.join(data_folder, "collectionandqueries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download: " + tar_filepath)
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/collectionandqueries.tar.gz", tar_filepath
)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
if not os.path.exists(qrels_filepath):
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/qrels.dev.tsv", qrels_filepath)
### Load data
corpus = {} # Our corpus pid => passage
dev_queries = {} # Our dev queries. qid => query
dev_rel_docs = {} # Mapping qid => set with relevant pids
needed_pids = set() # Passage IDs we need
needed_qids = set() # Query IDs we need
# Load the 6980 dev queries
with open(dev_queries_file, encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
dev_queries[qid] = query.strip()
# Load which passages are relevant for which queries
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split("\t")
if qid not in dev_queries:
continue
if qid not in dev_rel_docs:
dev_rel_docs[qid] = set()
dev_rel_docs[qid].add(pid)
needed_pids.add(pid)
needed_qids.add(qid)
# Read passages
with open(collection_filepath, encoding="utf8") as fIn:
for line in fIn:
pid, passage = line.strip().split("\t")
passage = passage
if pid in needed_pids or corpus_max_size <= 0 or len(corpus) <= corpus_max_size:
corpus[pid] = passage.strip()
## Run evaluator
logging.info("Queries: {}".format(len(dev_queries)))
logging.info("Corpus: {}".format(len(corpus)))
ir_evaluator = evaluation.InformationRetrievalEvaluator(
dev_queries,
corpus,
dev_rel_docs,
show_progress_bar=True,
corpus_chunk_size=100000,
precision_recall_at_k=[10, 100],
name="msmarco dev",
)
ir_evaluator(model)
|
"""
This script runs the evaluation of an SBERT msmarco model on the
MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product.
Usage:
python eval_msmarco.py model_name [max_corpus_size_in_thousands]
"""
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, util
import logging
import sys
import os
import tarfile
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Name of the SBERT model
model_name = sys.argv[1]
# You can limit the approx. max size of the corpus. Pass 100 as second parameter and the corpus has a size of approx 100k docs
corpus_max_size = int(sys.argv[2]) * 1000 if len(sys.argv) >= 3 else 0
#### Load model
model = SentenceTransformer(model_name)
### Data files
data_folder = "msmarco-data"
os.makedirs(data_folder, exist_ok=True)
collection_filepath = os.path.join(data_folder, "collection.tsv")
dev_queries_file = os.path.join(data_folder, "queries.dev.small.tsv")
qrels_filepath = os.path.join(data_folder, "qrels.dev.tsv")
### Download files if needed
if not os.path.exists(collection_filepath) or not os.path.exists(dev_queries_file):
tar_filepath = os.path.join(data_folder, "collectionandqueries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download: " + tar_filepath)
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/collectionandqueries.tar.gz", tar_filepath
)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
if not os.path.exists(qrels_filepath):
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/qrels.dev.tsv", qrels_filepath)
### Load data
corpus = {} # Our corpus pid => passage
dev_queries = {} # Our dev queries. qid => query
dev_rel_docs = {} # Mapping qid => set with relevant pids
needed_pids = set() # Passage IDs we need
needed_qids = set() # Query IDs we need
# Load the 6980 dev queries
with open(dev_queries_file, encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
dev_queries[qid] = query.strip()
# Load which passages are relevant for which queries
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split("\t")
if qid not in dev_queries:
continue
if qid not in dev_rel_docs:
dev_rel_docs[qid] = set()
dev_rel_docs[qid].add(pid)
needed_pids.add(pid)
needed_qids.add(qid)
# Read passages
with open(collection_filepath, encoding="utf8") as fIn:
for line in fIn:
pid, passage = line.strip().split("\t")
passage = passage
if pid in needed_pids or corpus_max_size <= 0 or len(corpus) <= corpus_max_size:
corpus[pid] = passage.strip()
## Run evaluator
logging.info("Queries: {}".format(len(dev_queries)))
logging.info("Corpus: {}".format(len(corpus)))
ir_evaluator = evaluation.InformationRetrievalEvaluator(
dev_queries,
corpus,
dev_rel_docs,
show_progress_bar=True,
corpus_chunk_size=100000,
precision_recall_at_k=[10, 100],
name="msmarco dev",
)
ir_evaluator(model)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='VideoBytes')
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_bytes')
class VideoBytes(bytes, AbstractType):
"""
Bytes that store a video and that can be load into a video tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self, **kwargs) -> VideoLoadResult:
"""
Load the video from the bytes into a VideoLoadResult object consisting of a
VideoNdArray (`VideoLoadResult.video`), an AudioNdArray
(`VideoLoadResult.audio`) and an NdArray containing the key frame indices
(`VideoLoadResult.key_frame_indices`).
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
from docarray.typing import VideoUrl
import numpy as np
class MyDoc(BaseDoc):
video_url: VideoUrl
doc = MyDoc(video_url="toydata/mp_.mp4")
video, audio, key_frame_indices = doc.video_url.load()
assert isinstance(video, np.ndarray)
assert isinstance(audio, np.ndarray)
assert isinstance(key_frame_indices, np.ndarray)
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described in:
https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open
:return: a VideoLoadResult instance with video, audio and keyframe indices
"""
import av
with av.open(BytesIO(self), **kwargs) as container:
audio_frames = []
video_frames = []
keyframe_indices = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
video_frames.append(frame.to_ndarray(format='rgb24'))
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='VideoBytes')
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_bytes')
class VideoBytes(bytes, AbstractType):
"""
Bytes that store a video and that can be load into a video tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self, **kwargs) -> VideoLoadResult:
"""
Load the video from the bytes into a VideoLoadResult object consisting of a
VideoNdArray (`VideoLoadResult.video`), an AudioNdArray
(`VideoLoadResult.audio`) and an NdArray containing the key frame indices
(`VideoLoadResult.key_frame_indices`).
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDocument
from docarray.typing import VideoUrl
import numpy as np
class MyDoc(BaseDocument):
video_url: VideoUrl
doc = MyDoc(video_url="toydata/mp_.mp4")
video, audio, key_frame_indices = doc.video_url.load()
assert isinstance(video, np.ndarray)
assert isinstance(audio, np.ndarray)
assert isinstance(key_frame_indices, np.ndarray)
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described in:
https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open
:return: a VideoLoadResult instance with video, audio and keyframe indices
"""
import av
with av.open(BytesIO(self), **kwargs) as container:
audio_frames = []
video_frames = []
keyframe_indices = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
video_frames.append(frame.to_ndarray(format='rgb24'))
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
import logging
import aiohttp
from fastapi import APIRouter
from backend.util.settings import Settings
from .models import TurnstileVerifyRequest, TurnstileVerifyResponse
logger = logging.getLogger(__name__)
router = APIRouter()
settings = Settings()
@router.post("/verify", response_model=TurnstileVerifyResponse)
async def verify_turnstile_token(
request: TurnstileVerifyRequest,
) -> TurnstileVerifyResponse:
"""
Verify a Cloudflare Turnstile token.
This endpoint verifies a token returned by the Cloudflare Turnstile challenge
on the client side. It returns whether the verification was successful.
"""
logger.info(f"Verifying Turnstile token for action: {request.action}")
return await verify_token(request)
async def verify_token(request: TurnstileVerifyRequest) -> TurnstileVerifyResponse:
"""
Verify a Cloudflare Turnstile token by making a request to the Cloudflare API.
"""
# Get the secret key from settings
turnstile_secret_key = settings.secrets.turnstile_secret_key
turnstile_verify_url = settings.secrets.turnstile_verify_url
if not turnstile_secret_key:
logger.error(
"Turnstile secret key missing. Set TURNSTILE_SECRET_KEY to enable verification."
)
return TurnstileVerifyResponse(
success=False,
error="CONFIGURATION_ERROR",
challenge_timestamp=None,
hostname=None,
action=None,
)
try:
async with aiohttp.ClientSession() as session:
payload = {
"secret": turnstile_secret_key,
"response": request.token,
}
if request.action:
payload["action"] = request.action
logger.debug(f"Verifying Turnstile token with action: {request.action}")
async with session.post(
turnstile_verify_url,
data=payload,
timeout=aiohttp.ClientTimeout(total=10),
) as response:
if response.status != 200:
error_text = await response.text()
logger.error(f"Turnstile API error: {error_text}")
return TurnstileVerifyResponse(
success=False,
error=f"API_ERROR: {response.status}",
challenge_timestamp=None,
hostname=None,
action=None,
)
data = await response.json()
logger.debug(f"Turnstile API response: {data}")
# Parse the response and return a structured object
return TurnstileVerifyResponse(
success=data.get("success", False),
error=(
data.get("error-codes", None)[0]
if data.get("error-codes")
else None
),
challenge_timestamp=data.get("challenge_timestamp"),
hostname=data.get("hostname"),
action=data.get("action"),
)
except aiohttp.ClientError as e:
logger.error(f"Connection error to Turnstile API: {str(e)}")
return TurnstileVerifyResponse(
success=False,
error=f"CONNECTION_ERROR: {str(e)}",
challenge_timestamp=None,
hostname=None,
action=None,
)
except Exception as e:
logger.error(f"Unexpected error in Turnstile verification: {str(e)}")
return TurnstileVerifyResponse(
success=False,
error=f"UNEXPECTED_ERROR: {str(e)}",
challenge_timestamp=None,
hostname=None,
action=None,
)
|
import logging
import aiohttp
from fastapi import APIRouter
from backend.util.settings import Settings
from .models import TurnstileVerifyRequest, TurnstileVerifyResponse
logger = logging.getLogger(__name__)
router = APIRouter()
settings = Settings()
@router.post("/verify", response_model=TurnstileVerifyResponse)
async def verify_turnstile_token(
request: TurnstileVerifyRequest,
) -> TurnstileVerifyResponse:
"""
Verify a Cloudflare Turnstile token.
This endpoint verifies a token returned by the Cloudflare Turnstile challenge
on the client side. It returns whether the verification was successful.
"""
logger.info(f"Verifying Turnstile token for action: {request.action}")
return await verify_token(request)
async def verify_token(request: TurnstileVerifyRequest) -> TurnstileVerifyResponse:
"""
Verify a Cloudflare Turnstile token by making a request to the Cloudflare API.
"""
# Get the secret key from settings
turnstile_secret_key = settings.secrets.turnstile_secret_key
turnstile_verify_url = settings.secrets.turnstile_verify_url
if not turnstile_secret_key:
logger.error("Turnstile secret key is not configured")
return TurnstileVerifyResponse(
success=False,
error="CONFIGURATION_ERROR",
challenge_timestamp=None,
hostname=None,
action=None,
)
try:
async with aiohttp.ClientSession() as session:
payload = {
"secret": turnstile_secret_key,
"response": request.token,
}
if request.action:
payload["action"] = request.action
logger.debug(f"Verifying Turnstile token with action: {request.action}")
async with session.post(
turnstile_verify_url,
data=payload,
timeout=aiohttp.ClientTimeout(total=10),
) as response:
if response.status != 200:
error_text = await response.text()
logger.error(f"Turnstile API error: {error_text}")
return TurnstileVerifyResponse(
success=False,
error=f"API_ERROR: {response.status}",
challenge_timestamp=None,
hostname=None,
action=None,
)
data = await response.json()
logger.debug(f"Turnstile API response: {data}")
# Parse the response and return a structured object
return TurnstileVerifyResponse(
success=data.get("success", False),
error=(
data.get("error-codes", None)[0]
if data.get("error-codes")
else None
),
challenge_timestamp=data.get("challenge_timestamp"),
hostname=data.get("hostname"),
action=data.get("action"),
)
except aiohttp.ClientError as e:
logger.error(f"Connection error to Turnstile API: {str(e)}")
return TurnstileVerifyResponse(
success=False,
error=f"CONNECTION_ERROR: {str(e)}",
challenge_timestamp=None,
hostname=None,
action=None,
)
except Exception as e:
logger.error(f"Unexpected error in Turnstile verification: {str(e)}")
return TurnstileVerifyResponse(
success=False,
error=f"UNEXPECTED_ERROR: {str(e)}",
challenge_timestamp=None,
hostname=None,
action=None,
)
|
import os
import fsspec
import pytest
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from datasets.utils._hf_hub_fixes import dataset_info as hf_api_dataset_info
from .utils import require_lz4, require_zstandard
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.ls("/") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api_dataset_info(hf_api, hf_private_dataset_repo_txt_data, use_auth_token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(text_file) as f:
assert hffs.open("data/text_data.txt", "r").read() == f.read()
|
import os
import fsspec
import pytest
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from datasets.utils._hf_hub_fixes import dataset_info as hf_api_dataset_info
from .utils import require_lz4, require_zstandard
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.ls("/") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api_dataset_info(hf_api, hf_private_dataset_repo_txt_data, use_auth_token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(text_file) as f:
assert hffs.open("data/text_data.txt", "r").read() == f.read()
|
"""
This script downloads the WikiMatrix corpus (https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix)
and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages.
The WikiMatrix mined parallel sentences from Wikipedia in various languages.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import gzip
import os
import sentence_transformers.util
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
num_dev_sentences = 1000 # Number of sentences we want to use for development
threshold = 1.075 # Only use sentences with a LASER similarity score above the threshold
download_url = "https://dl.fbaipublicfiles.com/laser/WikiMatrix/v1/"
download_folder = "../datasets/WikiMatrix/"
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(download_folder), exist_ok=True)
os.makedirs(parallel_sentences_folder, exist_ok=True)
for source_lang in source_languages:
for target_lang in target_languages:
filename_train = os.path.join(
parallel_sentences_folder, "WikiMatrix-{}-{}-train.tsv.gz".format(source_lang, target_lang)
)
filename_dev = os.path.join(
parallel_sentences_folder, "WikiMatrix-{}-{}-dev.tsv.gz".format(source_lang, target_lang)
)
if not os.path.exists(filename_train) and not os.path.exists(filename_dev):
langs_ordered = sorted([source_lang, target_lang])
wikimatrix_filename = "WikiMatrix.{}-{}.tsv.gz".format(*langs_ordered)
wikimatrix_filepath = os.path.join(download_folder, wikimatrix_filename)
if not os.path.exists(wikimatrix_filepath):
print("Download", download_url + wikimatrix_filename)
try:
sentence_transformers.util.http_get(download_url + wikimatrix_filename, wikimatrix_filepath)
except Exception:
print("Was not able to download", download_url + wikimatrix_filename)
continue
if not os.path.exists(wikimatrix_filepath):
continue
train_sentences = []
dev_sentences = []
dev_sentences_set = set()
extract_dev_sentences = True
with gzip.open(wikimatrix_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
score, sent1, sent2 = line.strip().split("\t")
sent1 = sent1.strip()
sent2 = sent2.strip()
score = float(score)
if score < threshold:
break
if sent1 == sent2:
continue
if langs_ordered.index(source_lang) == 1: # Swap, so that src lang is sent1
sent1, sent2 = sent2, sent1
# Avoid duplicates in development set
if sent1 in dev_sentences_set or sent2 in dev_sentences_set:
continue
if extract_dev_sentences:
dev_sentences.append([sent1, sent2])
dev_sentences_set.add(sent1)
dev_sentences_set.add(sent2)
if len(dev_sentences) >= num_dev_sentences:
extract_dev_sentences = False
else:
train_sentences.append([sent1, sent2])
print("Write", len(dev_sentences), "dev sentences", filename_dev)
with gzip.open(filename_dev, "wt", encoding="utf8") as fOut:
for sents in dev_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("Write", len(train_sentences), "train sentences", filename_train)
with gzip.open(filename_train, "wt", encoding="utf8") as fOut:
for sents in train_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("---DONE---")
|
"""
This script downloads the WikiMatrix corpus (https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix)
and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages.
The WikiMatrix mined parallel sentences from Wikipedia in various languages.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import os
import sentence_transformers.util
import gzip
import csv
from tqdm.autonotebook import tqdm
source_languages = set(['en']) #Languages our (monolingual) teacher model understands
target_languages = set(['de', 'es', 'it', 'fr', 'ar', 'tr']) #New languages we want to extend to
num_dev_sentences = 1000 #Number of sentences we want to use for development
threshold = 1.075 #Only use sentences with a LASER similarity score above the threshold
download_url = "https://dl.fbaipublicfiles.com/laser/WikiMatrix/v1/"
download_folder = "../datasets/WikiMatrix/"
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(download_folder), exist_ok=True)
os.makedirs(parallel_sentences_folder, exist_ok=True)
for source_lang in source_languages:
for target_lang in target_languages:
filename_train = os.path.join(parallel_sentences_folder, "WikiMatrix-{}-{}-train.tsv.gz".format(source_lang, target_lang))
filename_dev = os.path.join(parallel_sentences_folder, "WikiMatrix-{}-{}-dev.tsv.gz".format(source_lang, target_lang))
if not os.path.exists(filename_train) and not os.path.exists(filename_dev):
langs_ordered = sorted([source_lang, target_lang])
wikimatrix_filename = "WikiMatrix.{}-{}.tsv.gz".format(*langs_ordered)
wikimatrix_filepath = os.path.join(download_folder, wikimatrix_filename)
if not os.path.exists(wikimatrix_filepath):
print("Download", download_url+wikimatrix_filename)
try:
sentence_transformers.util.http_get(download_url+wikimatrix_filename, wikimatrix_filepath)
except:
print("Was not able to download", download_url+wikimatrix_filename)
continue
if not os.path.exists(wikimatrix_filepath):
continue
train_sentences = []
dev_sentences = []
dev_sentences_set = set()
extract_dev_sentences = True
with gzip.open(wikimatrix_filepath, 'rt', encoding='utf8') as fIn:
for line in fIn:
score, sent1, sent2 = line.strip().split('\t')
sent1 = sent1.strip()
sent2 = sent2.strip()
score = float(score)
if score < threshold:
break
if sent1 == sent2:
continue
if langs_ordered.index(source_lang) == 1: #Swap, so that src lang is sent1
sent1, sent2 = sent2, sent1
# Avoid duplicates in development set
if sent1 in dev_sentences_set or sent2 in dev_sentences_set:
continue
if extract_dev_sentences:
dev_sentences.append([sent1, sent2])
dev_sentences_set.add(sent1)
dev_sentences_set.add(sent2)
if len(dev_sentences) >= num_dev_sentences:
extract_dev_sentences = False
else:
train_sentences.append([sent1, sent2])
print("Write", len(dev_sentences), "dev sentences", filename_dev)
with gzip.open(filename_dev, 'wt', encoding='utf8') as fOut:
for sents in dev_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("Write", len(train_sentences), "train sentences", filename_train)
with gzip.open(filename_train, 'wt', encoding='utf8') as fOut:
for sents in train_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("---DONE---")
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.optimizers.schedules.learning_rate_schedule import (
CosineDecay as CosineDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
CosineDecayRestarts as CosineDecayRestarts,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
ExponentialDecay as ExponentialDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
InverseTimeDecay as InverseTimeDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
LearningRateSchedule as LearningRateSchedule,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
PiecewiseConstantDecay as PiecewiseConstantDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
PolynomialDecay as PolynomialDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
deserialize as deserialize,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
serialize as serialize,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.optimizers.schedules.learning_rate_schedule import CosineDecay
from keras.src.optimizers.schedules.learning_rate_schedule import (
CosineDecayRestarts,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
ExponentialDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
InverseTimeDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
LearningRateSchedule,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
PiecewiseConstantDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
PolynomialDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import deserialize
from keras.src.optimizers.schedules.learning_rate_schedule import serialize
|
import torch
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import Datapoint
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._video import Video
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import Datapoint
from ._image import Image
from ._mask import Mask
from ._video import Video
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
import numpy as np
import pytest
from docarray.computation.numpy_backend import NumpyCompBackend
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((5)), 1),
(np.zeros((1, 5)), 2),
(np.zeros((5, 5)), 2),
(np.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert NumpyCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((10,)), (10,)),
(np.zeros((5, 5)), (5, 5)),
(np.zeros(()), ()),
],
)
def test_shape(array, result):
shape = NumpyCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_empty():
array = NumpyCompBackend.empty((10, 3))
assert array.shape == (10, 3)
|
import numpy as np
import pytest
from docarray.computation.numpy_backend import NumpyCompBackend
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
def test_empty():
array = NumpyCompBackend.empty((10, 3))
assert array.shape == (10, 3)
|
"""Class for a VectorStore-backed memory object."""
from collections.abc import Sequence
from typing import Any, Optional, Union
from langchain_core._api import deprecated
from langchain_core.documents import Document
from langchain_core.memory import BaseMemory
from langchain_core.vectorstores import VectorStoreRetriever
from pydantic import Field
from langchain.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class VectorStoreRetrieverMemory(BaseMemory):
"""Store the conversation history in a vector store and retrieves the relevant
parts of past conversation based on the input.
"""
retriever: VectorStoreRetriever = Field(exclude=True)
"""VectorStoreRetriever object to connect to."""
memory_key: str = "history" #: :meta private:
"""Key name to locate the memories in the result of load_memory_variables."""
input_key: Optional[str] = None
"""Key name to index the inputs to load_memory_variables."""
return_docs: bool = False
"""Whether or not to return the result of querying the database directly."""
exclude_input_keys: Sequence[str] = Field(default_factory=tuple)
"""Input keys to exclude in addition to memory key when constructing the document"""
@property
def memory_variables(self) -> list[str]:
"""The list of keys emitted from the load_memory_variables method."""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _documents_to_memory_variables(
self, docs: list[Document]
) -> dict[str, Union[list[Document], str]]:
result: Union[list[Document], str]
if not self.return_docs:
result = "\n".join([doc.page_content for doc in docs])
else:
result = docs
return {self.memory_key: result}
def load_memory_variables(
self, inputs: dict[str, Any]
) -> dict[str, Union[list[Document], str]]:
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = self.retriever.invoke(query)
return self._documents_to_memory_variables(docs)
async def aload_memory_variables(
self, inputs: dict[str, Any]
) -> dict[str, Union[list[Document], str]]:
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = await self.retriever.ainvoke(query)
return self._documents_to_memory_variables(docs)
def _form_documents(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> list[Document]:
"""Format context from this conversation to buffer."""
# Each document should only include the current turn, not the chat history
exclude = set(self.exclude_input_keys)
exclude.add(self.memory_key)
filtered_inputs = {k: v for k, v in inputs.items() if k not in exclude}
texts = [
f"{k}: {v}"
for k, v in list(filtered_inputs.items()) + list(outputs.items())
]
page_content = "\n".join(texts)
return [Document(page_content=page_content)]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
self.retriever.add_documents(documents)
async def asave_context(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> None:
"""Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
await self.retriever.aadd_documents(documents)
def clear(self) -> None:
"""Nothing to clear."""
async def aclear(self) -> None:
"""Nothing to clear."""
|
"""Class for a VectorStore-backed memory object."""
from collections.abc import Sequence
from typing import Any, Optional, Union
from langchain_core._api import deprecated
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStoreRetriever
from pydantic import Field
from langchain.memory.chat_memory import BaseMemory
from langchain.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class VectorStoreRetrieverMemory(BaseMemory):
"""Store the conversation history in a vector store and retrieves the relevant
parts of past conversation based on the input.
"""
retriever: VectorStoreRetriever = Field(exclude=True)
"""VectorStoreRetriever object to connect to."""
memory_key: str = "history" #: :meta private:
"""Key name to locate the memories in the result of load_memory_variables."""
input_key: Optional[str] = None
"""Key name to index the inputs to load_memory_variables."""
return_docs: bool = False
"""Whether or not to return the result of querying the database directly."""
exclude_input_keys: Sequence[str] = Field(default_factory=tuple)
"""Input keys to exclude in addition to memory key when constructing the document"""
@property
def memory_variables(self) -> list[str]:
"""The list of keys emitted from the load_memory_variables method."""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _documents_to_memory_variables(
self, docs: list[Document]
) -> dict[str, Union[list[Document], str]]:
result: Union[list[Document], str]
if not self.return_docs:
result = "\n".join([doc.page_content for doc in docs])
else:
result = docs
return {self.memory_key: result}
def load_memory_variables(
self, inputs: dict[str, Any]
) -> dict[str, Union[list[Document], str]]:
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = self.retriever.invoke(query)
return self._documents_to_memory_variables(docs)
async def aload_memory_variables(
self, inputs: dict[str, Any]
) -> dict[str, Union[list[Document], str]]:
"""Return history buffer."""
input_key = self._get_prompt_input_key(inputs)
query = inputs[input_key]
docs = await self.retriever.ainvoke(query)
return self._documents_to_memory_variables(docs)
def _form_documents(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> list[Document]:
"""Format context from this conversation to buffer."""
# Each document should only include the current turn, not the chat history
exclude = set(self.exclude_input_keys)
exclude.add(self.memory_key)
filtered_inputs = {k: v for k, v in inputs.items() if k not in exclude}
texts = [
f"{k}: {v}"
for k, v in list(filtered_inputs.items()) + list(outputs.items())
]
page_content = "\n".join(texts)
return [Document(page_content=page_content)]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
self.retriever.add_documents(documents)
async def asave_context(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> None:
"""Save context from this conversation to buffer."""
documents = self._form_documents(inputs, outputs)
await self.retriever.aadd_documents(documents)
def clear(self) -> None:
"""Nothing to clear."""
async def aclear(self) -> None:
"""Nothing to clear."""
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Queries: 323
Corpus: 3269
Score-Function: dot
Accuracy@1: 50.46%
Accuracy@3: 64.40%
Accuracy@5: 67.49%
Accuracy@10: 72.14%
Precision@1: 50.46%
Precision@3: 40.87%
Precision@5: 34.12%
Precision@10: 26.10%
Recall@1: 6.11%
Recall@3: 11.73%
Recall@5: 13.64%
Recall@10: 17.24%
MRR@10: 0.5801
NDCG@10: 0.3626
MAP@100: 0.1832
Model Query Sparsity: Active Dimensions: 43.1, Sparsity Ratio: 0.9986
Model Corpus Sparsity: Active Dimensions: 207.0, Sparsity Ratio: 0.9932
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.3626
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Queries: 323
Corpus: 3269
Score-Function: dot
Accuracy@1: 50.46%
Accuracy@3: 64.40%
Accuracy@5: 67.49%
Accuracy@10: 72.14%
Precision@1: 50.46%
Precision@3: 40.87%
Precision@5: 34.12%
Precision@10: 26.10%
Recall@1: 6.11%
Recall@3: 11.73%
Recall@5: 13.64%
Recall@10: 17.24%
MRR@10: 0.5801
NDCG@10: 0.3626
MAP@100: 0.1832
Model Sparsity Stats Query : Row Non-Zero Mean: 43.08049392700195, Row Sparsity Mean: 0.9985886216163635
Model Sparsity Stats Corpus : Row Non-Zero Mean: 206.8623504638672, Row Sparsity Mean: 0.9932224750518799
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.3626
|
import contextlib
import os
import shutil
import threading
import time
import pytest
from jina import Client, DocumentArray, Executor, Flow, requests, Deployment
from jina.helper import random_port
cur_dir = os.path.dirname(__file__)
@contextlib.contextmanager
def _update_file(input_file_path, output_file_path, temp_path):
backup_file = os.path.join(temp_path, 'backup.yaml')
try:
shutil.copy2(output_file_path, backup_file)
shutil.copy(input_file_path, output_file_path)
time.sleep(2.0)
yield
finally:
shutil.copy2(backup_file, output_file_path)
time.sleep(5.0)
def flow_run(flow, stop_event):
with flow:
flow.block(stop_event)
def deployment_run(depl, stop_event):
with depl:
depl.block(stop_event)
def test_flow_reload(tmpdir):
stop_event = threading.Event()
flow = Flow().add(
uses=os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'), reload=True
)
t = threading.Thread(target=flow_run, args=(flow, stop_event))
t.start()
time.sleep(5)
try:
client = Client(port=flow.port, protocol=str(flow.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(
os.path.join(os.path.join(cur_dir, 'exec'), 'config_alt.yml'),
os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'),
str(tmpdir),
):
client = Client(port=flow.port, protocol=str(flow.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
client = Client(port=flow.port, protocol=str(flow.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
finally:
stop_event.set()
t.join()
def test_deployment_reload(tmpdir):
stop_event = threading.Event()
depl = Deployment(
uses=os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'), reload=True
)
t = threading.Thread(target=deployment_run, args=(depl, stop_event))
t.start()
time.sleep(5)
try:
client = Client(port=depl.port, protocol=str(depl.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(
os.path.join(os.path.join(cur_dir, 'exec'), 'config_alt.yml'),
os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'),
str(tmpdir),
):
client = Client(port=depl.port, protocol=str(depl.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
client = Client(port=depl.port, protocol=str(depl.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
finally:
stop_event.set()
t.join()
|
import contextlib
import os
import shutil
import threading
import time
import pytest
from jina import Client, DocumentArray, Executor, Flow, requests
from jina.helper import random_port
cur_dir = os.path.dirname(__file__)
@contextlib.contextmanager
def _update_file(input_file_path, output_file_path, temp_path):
backup_file = os.path.join(temp_path, 'backup.yaml')
try:
shutil.copy2(output_file_path, backup_file)
shutil.copy(input_file_path, output_file_path)
time.sleep(2.0)
yield
finally:
shutil.copy2(backup_file, output_file_path)
time.sleep(5.0)
def flow_run(flow, stop_event):
with flow:
flow.block(stop_event)
def test_deployment_reload(tmpdir):
stop_event = threading.Event()
flow = Flow().add(
uses=os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'), reload=True
)
t = threading.Thread(target=flow_run, args=(flow, stop_event))
t.start()
time.sleep(5)
try:
client = Client(port=flow.port, protocol=str(flow.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
with _update_file(
os.path.join(os.path.join(cur_dir, 'exec'), 'config_alt.yml'),
os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'),
str(tmpdir),
):
client = Client(port=flow.port, protocol=str(flow.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorAfterReload'
client = Client(port=flow.port, protocol=str(flow.protocol))
res = client.post(on='/', inputs=DocumentArray.empty(10))
assert len(res) == 10
for doc in res:
assert doc.text == 'MyExecutorBeforeReload'
finally:
stop_event.set()
t.join()
|
"""Integration test for JIRA API Wrapper."""
from langchain_community.utilities.jira import JiraAPIWrapper
def test_search() -> None:
"""Test for Searching issues on JIRA"""
jql = "project = TP"
jira = JiraAPIWrapper()
output = jira.run("jql", jql)
assert "issues" in output
def test_getprojects() -> None:
"""Test for getting projects on JIRA"""
jira = JiraAPIWrapper()
output = jira.run("get_projects", "")
assert "projects" in output
def test_create_ticket() -> None:
"""Test the Create Ticket Call that Creates a Issue/Ticket on JIRA."""
issue_string = (
'{"summary": "Test Summary", "description": "Test Description",'
' "issuetype": {"name": "Bug"}, "project": {"key": "TP"}}'
)
jira = JiraAPIWrapper()
output = jira.run("create_issue", issue_string)
assert "id" in output
assert "key" in output
def test_create_confluence_page() -> None:
"""Test for getting projects on JIRA"""
jira = JiraAPIWrapper()
create_page_dict = (
'{"space": "ROC", "title":"This is the title",'
'"body":"This is the body. You can use '
'<strong>HTML tags</strong>!"}'
)
output = jira.run("create_page", create_page_dict)
assert "type" in output
assert "page" in output
def test_other() -> None:
"""Non-exhaustive test for accessing other JIRA API methods"""
jira = JiraAPIWrapper()
issue_create_dict = """
{
"function":"issue_create",
"kwargs": {
"fields": {
"summary": "Test Summary",
"description": "Test Description",
"issuetype": {"name": "Bug"},
"project": {"key": "TP"}
}
}
}
"""
output = jira.run("other", issue_create_dict)
assert "id" in output
assert "key" in output
|
"""Integration test for JIRA API Wrapper."""
from langchain_community.utilities.jira import JiraAPIWrapper
def test_search() -> None:
"""Test for Searching issues on JIRA"""
jql = "project = TP"
jira = JiraAPIWrapper() # type: ignore[call-arg]
output = jira.run("jql", jql)
assert "issues" in output
def test_getprojects() -> None:
"""Test for getting projects on JIRA"""
jira = JiraAPIWrapper() # type: ignore[call-arg]
output = jira.run("get_projects", "")
assert "projects" in output
def test_create_ticket() -> None:
"""Test the Create Ticket Call that Creates a Issue/Ticket on JIRA."""
issue_string = (
'{"summary": "Test Summary", "description": "Test Description",'
' "issuetype": {"name": "Bug"}, "project": {"key": "TP"}}'
)
jira = JiraAPIWrapper() # type: ignore[call-arg]
output = jira.run("create_issue", issue_string)
assert "id" in output
assert "key" in output
def test_create_confluence_page() -> None:
"""Test for getting projects on JIRA"""
jira = JiraAPIWrapper() # type: ignore[call-arg]
create_page_dict = (
'{"space": "ROC", "title":"This is the title",'
'"body":"This is the body. You can use '
'<strong>HTML tags</strong>!"}'
)
output = jira.run("create_page", create_page_dict)
assert "type" in output
assert "page" in output
def test_other() -> None:
"""Non-exhaustive test for accessing other JIRA API methods"""
jira = JiraAPIWrapper() # type: ignore[call-arg]
issue_create_dict = """
{
"function":"issue_create",
"kwargs": {
"fields": {
"summary": "Test Summary",
"description": "Test Description",
"issuetype": {"name": "Bug"},
"project": {"key": "TP"}
}
}
}
"""
output = jira.run("other", issue_create_dict)
assert "id" in output
assert "key" in output
|
__version__ = '0.13.11'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.10'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_true(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'true')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def test_disable_telemetry(monkeypatch):
monkeypatch.setenv('JINA_OPTOUT_TELEMETRY', 'True')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture(autouse=True)
def set_test_pip_version() -> None:
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
if 'JINA_GATEWAY_IMAGE' in os.environ: # maybe another fixture has already removed
del os.environ['JINA_GATEWAY_IMAGE']
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_true(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'true')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def test_disable_telemetry(monkeypatch):
monkeypatch.setenv('JINA_OPTOUT_TELEMETRY', 'True')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture(autouse=True)
def set_test_pip_version() -> None:
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
if 'JINA_GATEWAY_IMAGE' in os.environ: # maybe another fixture has already removed
del os.environ['JINA_GATEWAY_IMAGE']
|
import os
from typing import Dict
DEPLOYMENT_FILES = [
'statefulset-executor',
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir, '..', '..', '..', '..', 'resources', 'k8s', 'template'
)
def get_yaml(template: str, params: Dict) -> Dict:
"""Create a resource on Kubernetes based on the `template`. It fills the `template` using the `params`.
:param template: path to the template file.
:param params: dictionary for replacing the placeholders (keys) with the actual values.
:return: The yaml dictionary with the corresponding template filled with parameters
"""
if template == 'configmap':
yaml = _get_configmap_yaml(template, params)
elif template in DEPLOYMENT_FILES and params.get('device_plugins'):
yaml = _get_yaml(template, params)
yaml = _get_deployment_with_device_plugins(yaml, params)
else:
yaml = _get_yaml(template, params)
return yaml
def _get_yaml(template: str, params: Dict) -> Dict:
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
content = f.read()
for k, v in params.items():
content = content.replace(f'{{{k}}}', str(v))
d = yaml.safe_load(content)
return d
def _get_configmap_yaml(template: str, params: Dict):
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
config_map = yaml.safe_load(f)
config_map['metadata']['name'] = params.get('name') + '-' + 'configmap'
config_map['metadata']['namespace'] = params.get('namespace')
if params.get('data'):
for key, value in params['data'].items():
config_map['data'][key] = str(value)
return config_map
def _get_device_plugins(params: Dict):
data = {'limits': {}}
for key, value in params.items():
data['limits'][key] = value
return data
def _get_deployment_with_device_plugins(deployment: Dict, params: Dict) -> Dict:
device_plugins = _get_device_plugins(params['device_plugins'])
deployment['spec']['template']['spec']['containers'][0][
'resources'
] = device_plugins
return deployment
|
import os
from typing import Dict
DEPLOYMENT_FILES = [
'statefulset-executor',
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir, '..', '..', '..', '..', 'resources', 'k8s', 'template'
)
def get_yaml(template: str, params: Dict) -> Dict:
"""Create a resource on Kubernetes based on the `template`. It fills the `template` using the `params`.
:param template: path to the template file.
:param params: dictionary for replacing the placeholders (keys) with the actual values.
:return: The yaml dictionary with the corresponding template filled with parameters
"""
if template == 'configmap':
yaml = _get_configmap_yaml(template, params)
elif template in DEPLOYMENT_FILES and params.get('device_plugins'):
yaml = _get_yaml(template, params)
yaml = _get_deployment_with_device_plugins(yaml, params)
else:
yaml = _get_yaml(template, params)
return yaml
def _get_yaml(template: str, params: Dict) -> Dict:
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
content = f.read()
for k, v in params.items():
content = content.replace(f'{{{k}}}', str(v))
d = yaml.safe_load(content)
return d
def _get_configmap_yaml(template: str, params: Dict):
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
config_map = yaml.safe_load(f)
config_map['metadata']['name'] = params.get('name') + '-' + 'configmap'
config_map['metadata']['namespace'] = params.get('namespace')
if params.get('data'):
for key, value in params['data'].items():
config_map['data'][key] = value
return config_map
def _get_device_plugins(params: Dict):
data = {'limits': {}}
for key, value in params.items():
data['limits'][key] = value
return data
def _get_deployment_with_device_plugins(deployment: Dict, params: Dict) -> Dict:
device_plugins = _get_device_plugins(params['device_plugins'])
deployment['spec']['template']['spec']['containers'][0][
'resources'
] = device_plugins
return deployment
|
from typing import Any, Union
from langchain_core.utils.json import parse_json_markdown
from typing_extensions import override
from langchain.evaluation.schema import StringEvaluator
class JsonSchemaEvaluator(StringEvaluator):
"""An evaluator that validates a JSON prediction against a JSON schema reference.
This evaluator checks if a given JSON prediction conforms to the provided JSON schema.
If the prediction is valid, the score is True (no errors). Otherwise, the score is False (error occurred).
Attributes:
requires_input (bool): Whether the evaluator requires input.
requires_reference (bool): Whether the evaluator requires reference.
evaluation_name (str): The name of the evaluation.
Examples:
evaluator = JsonSchemaEvaluator()
result = evaluator.evaluate_strings(
prediction='{"name": "John", "age": 30}',
reference={
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"}
}
}
)
assert result["score"] is not None
""" # noqa: E501
def __init__(self, **kwargs: Any) -> None:
"""Initializes the JsonSchemaEvaluator.
Args:
kwargs: Additional keyword arguments.
Raises:
ImportError: If the jsonschema package is not installed.
"""
super().__init__()
try:
import jsonschema # noqa: F401
except ImportError:
msg = (
"The JsonSchemaEvaluator requires the jsonschema package."
" Please install it with `pip install jsonschema`."
)
raise ImportError(msg)
@property
def requires_input(self) -> bool:
"""Returns whether the evaluator requires input."""
return False
@property
def requires_reference(self) -> bool:
"""Returns whether the evaluator requires reference."""
return True
@property
def evaluation_name(self) -> str:
"""Returns the name of the evaluation."""
return "json_schema_validation"
def _parse_json(self, node: Any) -> Union[dict, list, None, float, bool, int, str]:
if isinstance(node, str):
return parse_json_markdown(node)
elif hasattr(node, "schema") and callable(getattr(node, "schema")):
# Pydantic model
return getattr(node, "schema")()
return node
def _validate(self, prediction: Any, schema: Any) -> dict:
from jsonschema import ValidationError, validate
try:
validate(instance=prediction, schema=schema)
return {
"score": True,
}
except ValidationError as e:
return {"score": False, "reasoning": repr(e)}
@override
def _evaluate_strings(
self,
prediction: Union[str, Any],
input: Union[str, Any] = None,
reference: Union[str, Any] = None,
**kwargs: Any,
) -> dict:
parsed_prediction = self._parse_json(prediction)
schema = self._parse_json(reference)
return self._validate(parsed_prediction, schema)
|
from typing import Any, Union
from langchain_core.utils.json import parse_json_markdown
from langchain.evaluation.schema import StringEvaluator
class JsonSchemaEvaluator(StringEvaluator):
"""An evaluator that validates a JSON prediction against a JSON schema reference.
This evaluator checks if a given JSON prediction conforms to the provided JSON schema.
If the prediction is valid, the score is True (no errors). Otherwise, the score is False (error occurred).
Attributes:
requires_input (bool): Whether the evaluator requires input.
requires_reference (bool): Whether the evaluator requires reference.
evaluation_name (str): The name of the evaluation.
Examples:
evaluator = JsonSchemaEvaluator()
result = evaluator.evaluate_strings(
prediction='{"name": "John", "age": 30}',
reference={
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"}
}
}
)
assert result["score"] is not None
""" # noqa: E501
def __init__(self, **kwargs: Any) -> None:
"""Initializes the JsonSchemaEvaluator.
Args:
kwargs: Additional keyword arguments.
Raises:
ImportError: If the jsonschema package is not installed.
"""
super().__init__()
try:
import jsonschema # noqa: F401
except ImportError:
msg = (
"The JsonSchemaEvaluator requires the jsonschema package."
" Please install it with `pip install jsonschema`."
)
raise ImportError(msg)
@property
def requires_input(self) -> bool:
"""Returns whether the evaluator requires input."""
return False
@property
def requires_reference(self) -> bool:
"""Returns whether the evaluator requires reference."""
return True
@property
def evaluation_name(self) -> str:
"""Returns the name of the evaluation."""
return "json_schema_validation"
def _parse_json(self, node: Any) -> Union[dict, list, None, float, bool, int, str]:
if isinstance(node, str):
return parse_json_markdown(node)
elif hasattr(node, "schema") and callable(getattr(node, "schema")):
# Pydantic model
return getattr(node, "schema")()
return node
def _validate(self, prediction: Any, schema: Any) -> dict:
from jsonschema import ValidationError, validate
try:
validate(instance=prediction, schema=schema)
return {
"score": True,
}
except ValidationError as e:
return {"score": False, "reasoning": repr(e)}
def _evaluate_strings(
self,
prediction: Union[str, Any],
input: Union[str, Any] = None,
reference: Union[str, Any] = None,
**kwargs: Any,
) -> dict:
parsed_prediction = self._parse_json(prediction)
schema = self._parse_json(reference)
return self._validate(parsed_prediction, schema)
|
"""Simple Reader that reads transcript and general info of Bilibili video."""
import warnings
from typing import Any, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class BilibiliTranscriptReader(BaseReader):
"""Bilibili Transcript and video info reader."""
@staticmethod
def get_bilibili_info_and_subs(bili_url):
import json
import re
import requests
from bilibili_api import sync, video
bvid = re.search(r"BV\w+", bili_url).group()
# Create credential object
v = video.Video(bvid=bvid)
# Get video info and basic info
video_info = sync(v.get_info())
title = video_info["title"]
desc = video_info["desc"]
# Get subtitle url
sub_list = video_info["subtitle"]["list"]
if sub_list:
sub_url = sub_list[0]["subtitle_url"]
result = requests.get(sub_url)
raw_sub_titles = json.loads(result.content)["body"]
raw_transcript = " ".join([c["content"] for c in raw_sub_titles])
# Add basic video info to transcript
return (
f"Video Title: {title}, description: {desc}\nTranscript:"
f" {raw_transcript}"
)
else:
raw_transcript = ""
warnings.warn(
f"No subtitles found for video: {bili_url}. Return Empty transcript."
)
return raw_transcript
def load_data(self, video_urls: List[str], **load_kwargs: Any) -> List[Document]:
"""
Load auto generated Video Transcripts from Bilibili, including additional metadata.
Args:
video_urls (List[str]): List of Bilibili links for which transcripts are to be read.
Returns:
List[Document]: A list of Document objects, each containing the transcript for a Bilibili video.
"""
results = []
for bili_url in video_urls:
try:
transcript = self.get_bilibili_info_and_subs(bili_url)
results.append(Document(text=transcript))
except Exception as e:
warnings.warn(
f"Error loading transcript for video {bili_url}: {e!s}. Skipping"
" video."
)
return results
|
"""Simple Reader that reads transcript and general info of Bilibili video."""
import warnings
from typing import Any, List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class BilibiliTranscriptReader(BaseReader):
"""Bilibili Transcript and video info reader."""
@staticmethod
def get_bilibili_info_and_subs(bili_url):
import json
import re
import requests
from bilibili_api import sync, video
bvid = re.search(r"BV\w+", bili_url).group()
# Create credential object
v = video.Video(bvid=bvid)
# Get video info and basic info
video_info = sync(v.get_info())
title = video_info["title"]
desc = video_info["desc"]
# Get subtitle url
sub_list = video_info["subtitle"]["list"]
if sub_list:
sub_url = sub_list[0]["subtitle_url"]
result = requests.get(sub_url)
raw_sub_titles = json.loads(result.content)["body"]
raw_transcript = " ".join([c["content"] for c in raw_sub_titles])
# Add basic video info to transcript
return (
f"Video Title: {title}, description: {desc}\nTranscript:"
f" {raw_transcript}"
)
else:
raw_transcript = ""
warnings.warn(
f"No subtitles found for video: {bili_url}. Return Empty transcript."
)
return raw_transcript
def load_data(self, video_urls: List[str], **load_kwargs: Any) -> List[Document]:
"""
Load auto generated Video Transcripts from Bilibili, including additional metadata.
Args:
video_urls (List[str]): List of Bilibili links for which transcripts are to be read.
Returns:
List[Document]: A list of Document objects, each containing the transcript for a Bilibili video.
"""
results = []
for bili_url in video_urls:
try:
transcript = self.get_bilibili_info_and_subs(bili_url)
results.append(Document(text=transcript))
except Exception as e:
warnings.warn(
f"Error loading transcript for video {bili_url}: {e!s}. Skipping"
" video."
)
return results
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = '.cache/AudioCLIP-Full-Training.pt',
traversal_paths: str = 'r',
batch_size: int = 32,
device: str = 'cpu',
download_model: bool = False,
*args,
**kwargs,
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
:param download_model: whether to download the model at start-up
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.device = device
self.traversal_paths = traversal_paths
self.batch_size = batch_size
if download_model:
import os
import subprocess
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
script_name = 'scripts/download_full.sh'
if 'Partial' in self.model_path:
script_name = 'scripts/download_partial.sh'
subprocess.call(['sh', script_name], cwd=root_path)
try:
self.model = AudioCLIP(pretrained=self.model_path).to(self.device).eval()
except FileNotFoundError:
raise FileNotFoundError(
'Please download AudioCLIP model and set the `model_path` argument.'
)
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs,
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if not docs:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
with torch.inference_mode():
for batch in docs.traverse_flat(traversal_paths).batch(batch_size):
self._create_embeddings(batch)
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, dict(d.tags).get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.model.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise NotImplementedError(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return blob, orig_sr
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = '.cache/AudioCLIP-Full-Training.pt',
traversal_paths: str = 'r',
batch_size: int = 32,
device: str = 'cpu',
download_model: bool = False,
*args,
**kwargs,
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
:param download_model: whether to download the model at start-up
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.device = device
self.traversal_paths = traversal_paths
self.batch_size = batch_size
if download_model:
import os
import subprocess
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
script_name = 'scripts/download_full.sh'
if 'Partial' in self.model_path:
script_name = 'scripts/download_partial.sh'
subprocess.call(['sh', script_name], cwd=root_path)
try:
self.model = AudioCLIP(pretrained=self.model_path).to(self.device).eval()
except FileNotFoundError:
raise FileNotFoundError(
'Please download AudioCLIP model and set the `model_path` argument.'
)
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs,
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if not docs:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
with torch.inference_mode():
for batch in docs.traverse_flat(traversal_paths).batch(batch_size):
self._create_embeddings(batch)
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, dict(d.tags).get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.model.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return blob, orig_sr
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
"""
Each spoke runs in an isolated process. We leverage the seccomp and setrlimit system utilities to restrict access to system calls and set limits on the resources a process can consume. To implement them, we define several helper functions here, which can be configured to meet specific security or system requirements for different use scenarios or apps.
"""
import resource
import tldextract
import platform
# Set timeout for spoke execution
TIMEOUT = 180
# Set the memory, cpu and write limits
# These are app-specific and can be be adjusted as needed
MEMORY_LIMIT = resource.getrlimit(resource.RLIMIT_AS)[1] # 10240 * 1024 * 1024
CPU_TIME_LIMIT = resource.getrlimit(resource.RLIMIT_CPU)[1] # 2 * 60
WRITE_LIMIT = resource.getrlimit(resource.RLIMIT_FSIZE)[1] # 10240 * 1024 * 1024
# Set the allowed root domains
# This is a list of root domains (eTLD+1) that the app is allowed to access
allowed_domains = ["localhost"]
def get_root_domain(url):
"""
Extract the root domain from a given URL.
Args:
url (str): The URL to extract the root domain from.
Returns:
str: The root domain of the URL.
"""
extracted = tldextract.extract(url)
return f"{extracted.domain}.{extracted.suffix}"
def is_request_allowed(url):
"""
Check if a request to a given URL is allowed based on the root domain.
Args:
url (str): The URL to check.
Returns:
bool: True if the request is allowed, False otherwise.
"""
root_domain = get_root_domain(url)
return root_domain in allowed_domains
def set_mem_limit():
"""
Set the CPU time, maximum virtual memory, and write limits for the process.
"""
# virtual memory
resource.setrlimit(resource.RLIMIT_AS, (MEMORY_LIMIT, MEMORY_LIMIT))
# cpu time
resource.setrlimit(resource.RLIMIT_CPU, (CPU_TIME_LIMIT, CPU_TIME_LIMIT))
# write limit i.e. don't allow an infinite stream to stdout/stderr
resource.setrlimit(resource.RLIMIT_FSIZE, (WRITE_LIMIT, WRITE_LIMIT))
# seccomp only works for Linux
if platform.system() == "Linux":
import pyseccomp as seccomp
def drop_perms():
"""
Set restrictions on system calls using seccomp for Linux.
The restrictions can be adjusted as needed based on the app's specifications.
"""
# Create a SyscallFilter instance with ALLOW as the default action
filter = seccomp.SyscallFilter(seccomp.ALLOW)
# load the filter in the kernel
filter.load()
else:
def drop_perms():
"""
Define a placeholder function for non-Linux platforms to restrict system calls.
"""
|
"""
Each spoke runs in an isolated process. We leverage the seccomp and setrlimit system utilities to restrict access to system calls and set limits on the resources a process can consume. To implement them, we define several helper functions here, which can be configured to meet specific security or system requirements for different use scenarios or apps.
"""
import resource
import tldextract
import platform
# Set timeout for spoke execution
TIMEOUT = 180
# Set the memory, cpu and write limits
# These are app-specific and can be be adjusted as needed
MEMORY_LIMIT = resource.getrlimit(resource.RLIMIT_AS)[1] # 10240 * 1024 * 1024
CPU_TIME_LIMIT = resource.getrlimit(resource.RLIMIT_CPU)[1] # 2 * 60
WRITE_LIMIT = resource.getrlimit(resource.RLIMIT_FSIZE)[1] # 10240 * 1024 * 1024
# Set the allowed root domains
# This is a list of root domains (eTLD+1) that the app is allowed to access
allowed_domains = ["localhost"]
def get_root_domain(url):
"""
Extract the root domain from a given URL.
Args:
url (str): The URL to extract the root domain from.
Returns:
str: The root domain of the URL.
"""
extracted = tldextract.extract(url)
return f"{extracted.domain}.{extracted.suffix}"
def is_request_allowed(url):
"""
Check if a request to a given URL is allowed based on the root domain.
Args:
url (str): The URL to check.
Returns:
bool: True if the request is allowed, False otherwise.
"""
root_domain = get_root_domain(url)
return root_domain in allowed_domains
def set_mem_limit():
"""
Set the CPU time, maximum virtual memory, and write limits for the process.
"""
# virtual memory
resource.setrlimit(resource.RLIMIT_AS, (MEMORY_LIMIT, MEMORY_LIMIT))
# cpu time
resource.setrlimit(resource.RLIMIT_CPU, (CPU_TIME_LIMIT, CPU_TIME_LIMIT))
# write limit i.e. don't allow an infinite stream to stdout/stderr
resource.setrlimit(resource.RLIMIT_FSIZE, (WRITE_LIMIT, WRITE_LIMIT))
# seccomp only works for Linux
if platform.system() == "Linux":
import pyseccomp as seccomp
def drop_perms():
"""
Set restrictions on system calls using seccomp for Linux.
The restrictions can be adjusted as needed based on the app's specifications.
"""
# Create a SyscallFilter instance with ALLOW as the default action
filter = seccomp.SyscallFilter(seccomp.ALLOW)
# load the filter in the kernel
filter.load()
else:
def drop_perms():
"""
Define a placeholder function for non-Linux platforms to restrict system calls.
"""
|
import csv
import gzip
import os
from . import InputExample
class STSDataReader:
"""Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
score_col_idx=2,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""filename specified which data split to use (train.csv, dev.csv, test.csv)."""
filepath = os.path.join(self.dataset_folder, filename)
with gzip.open(filepath, "rt", encoding="utf8") if filename.endswith(".gz") else open(
filepath, encoding="utf-8"
) as fIn:
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=5,
s2_col_idx=6,
score_col_idx=4,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
super().__init__(
dataset_folder=dataset_folder,
s1_col_idx=s1_col_idx,
s2_col_idx=s2_col_idx,
score_col_idx=score_col_idx,
delimiter=delimiter,
quoting=quoting,
normalize_scores=normalize_scores,
min_score=min_score,
max_score=max_score,
)
|
from . import InputExample
import csv
import gzip
import os
class STSDataReader:
"""Reads in the STS dataset. Each line contains two sentences (s1_col_idx, s2_col_idx) and one label (score_col_idx)
Default values expects a tab separated file with the first & second column the sentence pair and third column the score (0...1). Default config normalizes scores from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
score_col_idx=2,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
self.dataset_folder = dataset_folder
self.score_col_idx = score_col_idx
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.delimiter = delimiter
self.quoting = quoting
self.normalize_scores = normalize_scores
self.min_score = min_score
self.max_score = max_score
def get_examples(self, filename, max_examples=0):
"""filename specified which data split to use (train.csv, dev.csv, test.csv)."""
filepath = os.path.join(self.dataset_folder, filename)
with gzip.open(filepath, "rt", encoding="utf8") if filename.endswith(".gz") else open(
filepath, encoding="utf-8"
) as fIn:
data = csv.reader(fIn, delimiter=self.delimiter, quoting=self.quoting)
examples = []
for id, row in enumerate(data):
score = float(row[self.score_col_idx])
if self.normalize_scores: # Normalize to a 0...1 value
score = (score - self.min_score) / (self.max_score - self.min_score)
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
examples.append(InputExample(guid=filename + str(id), texts=[s1, s2], label=score))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
class STSBenchmarkDataReader(STSDataReader):
"""Reader especially for the STS benchmark dataset. There, the sentences are in column 5 and 6, the score is in column 4.
Scores are normalized from 0...5 to 0...1
"""
def __init__(
self,
dataset_folder,
s1_col_idx=5,
s2_col_idx=6,
score_col_idx=4,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize_scores=True,
min_score=0,
max_score=5,
):
super().__init__(
dataset_folder=dataset_folder,
s1_col_idx=s1_col_idx,
s2_col_idx=s2_col_idx,
score_col_idx=score_col_idx,
delimiter=delimiter,
quoting=quoting,
normalize_scores=normalize_scores,
min_score=min_score,
max_score=max_score,
)
|
import csv
import logging
import os
from typing import List
import numpy as np
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CEBinaryAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 1 outputs. It measure the
accuracy of the predict class vs. the gold labels. It uses a fixed threshold to determine the label (0 vs 1).
See CEBinaryClassificationEvaluator for an evaluator that determines automatically the optimal threshold.
"""
def __init__(
self,
sentence_pairs: List[List[str]],
labels: List[int],
name: str = "",
threshold: float = 0.5,
write_csv: bool = True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.threshold = threshold
self.csv_file = "CEBinaryAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CEBinaryAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = pred_scores > self.threshold
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info("Accuracy: {:.2f}".format(acc * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
import logging
import os
import csv
from typing import List
from ... import InputExample
import numpy as np
logger = logging.getLogger(__name__)
class CEBinaryAccuracyEvaluator:
"""
This evaluator can be used with the CrossEncoder class.
It is designed for CrossEncoders with 1 outputs. It measure the
accuracy of the predict class vs. the gold labels. It uses a fixed threshold to determine the label (0 vs 1).
See CEBinaryClassificationEvaluator for an evaluator that determines automatically the optimal threshold.
"""
def __init__(
self,
sentence_pairs: List[List[str]],
labels: List[int],
name: str = "",
threshold: float = 0.5,
write_csv: bool = True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.name = name
self.threshold = threshold
self.csv_file = "CEBinaryAccuracyEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Accuracy"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CEBinaryAccuracyEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
pred_labels = pred_scores > self.threshold
assert len(pred_labels) == len(self.labels)
acc = np.sum(pred_labels == self.labels) / len(self.labels)
logger.info("Accuracy: {:.2f}".format(acc * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc])
return acc
|
_base_ = 'yolact_r50_1x8_coco.py'
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(lr=8e-3),
clip_grad=dict(max_norm=35, norm_type=2))
# learning rate
max_epochs = 55
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 42, 49, 52],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = 'yolact_r50_1x8_coco.py'
optimizer = dict(type='SGD', lr=8e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[20, 42, 49, 52])
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
"""
========================
Decision Tree Regression
========================
In this example, we demonstrate the effect of changing the maximum depth of a
decision tree on how it fits to the data. We perform this once on a 1D regression
task and once on a multi-output regression task.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Decision Tree on a 1D Regression Task
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Here we fit a tree on a 1D regression task.
#
# The :ref:`decision trees <tree>` is
# used to fit a sine curve with addition noisy observation. As a result, it
# learns local linear regressions approximating the sine curve.
#
# We can see that if the maximum depth of the tree (controlled by the
# `max_depth` parameter) is set too high, the decision trees learn too fine
# details of the training data and learn from the noise, i.e. they overfit.
#
# Create a random 1D dataset
# --------------------------
import numpy as np
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# %%
# Fit regression model
# --------------------
# Here we fit two models with different maximum depths
from sklearn.tree import DecisionTreeRegressor
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# %%
# Predict
# -------
# Get predictions on the test set
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# %%
# Plot the results
# ----------------
import matplotlib.pyplot as plt
plt.figure()
plt.scatter(X, y, s=20, edgecolor="black", c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
# %%
# As you can see, the model with a depth of 5 (yellow) learns the details of the
# training data to the point that it overfits to the noise. On the other hand,
# the model with a depth of 2 (blue) learns the major tendencies in the data well
# and does not overfit. In real use cases, you need to make sure that the tree
# is not overfitting the training data, which can be done using cross-validation.
# %%
# Decision Tree Regression with Multi-Output Targets
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Here the :ref:`decision trees <tree>`
# is used to predict simultaneously the noisy `x` and `y` observations of a circle
# given a single underlying feature. As a result, it learns local linear
# regressions approximating the circle.
#
# We can see that if the maximum depth of the tree (controlled by the
# `max_depth` parameter) is set too high, the decision trees learn too fine
# details of the training data and learn from the noise, i.e. they overfit.
# %%
# Create a random dataset
# -----------------------
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += 0.5 - rng.rand(20, 2)
# %%
# Fit regression model
# --------------------
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# %%
# Predict
# -------
# Get predictions on the test set
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# %%
# Plot the results
# ----------------
plt.figure()
s = 25
plt.scatter(y[:, 0], y[:, 1], c="yellow", s=s, edgecolor="black", label="data")
plt.scatter(
y_1[:, 0],
y_1[:, 1],
c="cornflowerblue",
s=s,
edgecolor="black",
label="max_depth=2",
)
plt.scatter(y_2[:, 0], y_2[:, 1], c="red", s=s, edgecolor="black", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="blue", s=s, edgecolor="black", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Multi-output Decision Tree Regression")
plt.legend(loc="best")
plt.show()
# %%
# As you can see, the higher the value of `max_depth`, the more details of the data
# are caught by the model. However, the model also overfits to the data and is
# influenced by the noise.
|
"""
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Import the necessary modules and libraries
import matplotlib.pyplot as plt
import numpy as np
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, s=20, edgecolor="black", c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
from docarray.typing import AnyEmbedding, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDoc):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`),
a PointsAndColors object (`PointCloud3D.tensors`), and an AnyEmbedding
(`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
You can extend this Document:
.. code-block:: python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
pc.second_embedding = model(pc.tensors.colors)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDoc
from docarray.documents import PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDoc):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensors = mmdoc.point_cloud.url.load(samples=100)
# or
mmdoc.point_cloud.bytes_ = mmdoc.point_cloud.url.load_bytes()
You can display your point cloud from either its url, or its tensors:
.. code-block:: python
from docarray.documents import PointCloud3D
# display from url
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.url.display()
# display from tensors
pc.tensors = pc.url.load(samples=10000)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
"""
url: Optional[PointCloud3DUrl]
tensors: Optional[PointsAndColors]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensors=PointsAndColors(points=value))
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
from docarray.typing import AnyEmbedding, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDoc):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`),
a PointsAndColors object (`PointCloud3D.tensors`), and an AnyEmbedding
(`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
You can extend this Document:
.. code-block:: python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensors = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
pc.second_embedding = model(pc.tensors.colors)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDoc
from docarray.documents import PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDoc):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensors = mmdoc.point_cloud.url.load(samples=100)
# or
mmdoc.point_cloud.bytes_ = mmdoc.point_cloud.url.load_bytes()
You can display your point cloud from either its url, or its tensors:
.. code-block:: python
from docarray.documents import PointCloud3D
# display from url
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.url.display()
# display from tensors
pc.tensors = pc.url.load(samples=10000)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensors.points)
"""
url: Optional[PointCloud3DUrl]
tensors: Optional[PointsAndColors]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensors=PointsAndColors(points=value))
return super().validate(value)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, get_root_logger, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import find_latest_checkpoint, update_data_root
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptPixelList, PixelList,
RangeType)
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg', 'split_batch', 'register_all_modules',
'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM', 'all_reduce_dict',
'allreduce_grads', 'reduce_mean', 'sync_random_seed', 'ConfigType',
'InstanceList', 'MultiConfig', 'OptConfigType', 'OptInstanceList',
'OptMultiConfig', 'OptPixelList', 'PixelList', 'RangeType'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .logger import get_caller_name, get_root_logger, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import find_latest_checkpoint, update_data_root
from .parallel import MMDataParallel, MMDistributedDataParallel
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptPixelList, PixelList,
RangeType)
from .util_distribution import build_ddp, build_dp, get_device
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp',
'get_device', 'MMDataParallel', 'MMDistributedDataParallel',
'register_all_modules', 'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM',
'DistOptimizerHook', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType'
]
|
from collections.abc import AsyncIterator, Iterator, Sequence
from typing import (
Any,
Callable,
Optional,
TypeVar,
Union,
)
from langchain_core.stores import BaseStore
K = TypeVar("K")
V = TypeVar("V")
class EncoderBackedStore(BaseStore[K, V]):
"""Wraps a store with key and value encoders/decoders.
Examples that uses JSON for encoding/decoding:
.. code-block:: python
import json
def key_encoder(key: int) -> str:
return json.dumps(key)
def value_serializer(value: float) -> str:
return json.dumps(value)
def value_deserializer(serialized_value: str) -> float:
return json.loads(serialized_value)
# Create an instance of the abstract store
abstract_store = MyCustomStore()
# Create an instance of the encoder-backed store
store = EncoderBackedStore(
store=abstract_store,
key_encoder=key_encoder,
value_serializer=value_serializer,
value_deserializer=value_deserializer
)
# Use the encoder-backed store methods
store.mset([(1, 3.14), (2, 2.718)])
values = store.mget([1, 2]) # Retrieves [3.14, 2.718]
store.mdelete([1, 2]) # Deletes the keys 1 and 2
"""
def __init__(
self,
store: BaseStore[str, Any],
key_encoder: Callable[[K], str],
value_serializer: Callable[[V], bytes],
value_deserializer: Callable[[Any], V],
) -> None:
"""Initialize an EncodedStore."""
self.store = store
self.key_encoder = key_encoder
self.value_serializer = value_serializer
self.value_deserializer = value_deserializer
def mget(self, keys: Sequence[K]) -> list[Optional[V]]:
"""Get the values associated with the given keys."""
encoded_keys: list[str] = [self.key_encoder(key) for key in keys]
values = self.store.mget(encoded_keys)
return [
self.value_deserializer(value) if value is not None else value
for value in values
]
async def amget(self, keys: Sequence[K]) -> list[Optional[V]]:
"""Get the values associated with the given keys."""
encoded_keys: list[str] = [self.key_encoder(key) for key in keys]
values = await self.store.amget(encoded_keys)
return [
self.value_deserializer(value) if value is not None else value
for value in values
]
def mset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None:
"""Set the values for the given keys."""
encoded_pairs = [
(self.key_encoder(key), self.value_serializer(value))
for key, value in key_value_pairs
]
self.store.mset(encoded_pairs)
async def amset(self, key_value_pairs: Sequence[tuple[K, V]]) -> None:
"""Set the values for the given keys."""
encoded_pairs = [
(self.key_encoder(key), self.value_serializer(value))
for key, value in key_value_pairs
]
await self.store.amset(encoded_pairs)
def mdelete(self, keys: Sequence[K]) -> None:
"""Delete the given keys and their associated values."""
encoded_keys = [self.key_encoder(key) for key in keys]
self.store.mdelete(encoded_keys)
async def amdelete(self, keys: Sequence[K]) -> None:
"""Delete the given keys and their associated values."""
encoded_keys = [self.key_encoder(key) for key in keys]
await self.store.amdelete(encoded_keys)
def yield_keys(
self, *, prefix: Optional[str] = None
) -> Union[Iterator[K], Iterator[str]]:
"""Get an iterator over keys that match the given prefix."""
# For the time being this does not return K, but str
# it's for debugging purposes. Should fix this.
yield from self.store.yield_keys(prefix=prefix)
async def ayield_keys(
self, *, prefix: Optional[str] = None
) -> Union[AsyncIterator[K], AsyncIterator[str]]:
"""Get an iterator over keys that match the given prefix."""
# For the time being this does not return K, but str
# it's for debugging purposes. Should fix this.
async for key in self.store.ayield_keys(prefix=prefix):
yield key
|
from typing import (
Any,
AsyncIterator,
Callable,
Iterator,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
from langchain_core.stores import BaseStore
K = TypeVar("K")
V = TypeVar("V")
class EncoderBackedStore(BaseStore[K, V]):
"""Wraps a store with key and value encoders/decoders.
Examples that uses JSON for encoding/decoding:
.. code-block:: python
import json
def key_encoder(key: int) -> str:
return json.dumps(key)
def value_serializer(value: float) -> str:
return json.dumps(value)
def value_deserializer(serialized_value: str) -> float:
return json.loads(serialized_value)
# Create an instance of the abstract store
abstract_store = MyCustomStore()
# Create an instance of the encoder-backed store
store = EncoderBackedStore(
store=abstract_store,
key_encoder=key_encoder,
value_serializer=value_serializer,
value_deserializer=value_deserializer
)
# Use the encoder-backed store methods
store.mset([(1, 3.14), (2, 2.718)])
values = store.mget([1, 2]) # Retrieves [3.14, 2.718]
store.mdelete([1, 2]) # Deletes the keys 1 and 2
"""
def __init__(
self,
store: BaseStore[str, Any],
key_encoder: Callable[[K], str],
value_serializer: Callable[[V], bytes],
value_deserializer: Callable[[Any], V],
) -> None:
"""Initialize an EncodedStore."""
self.store = store
self.key_encoder = key_encoder
self.value_serializer = value_serializer
self.value_deserializer = value_deserializer
def mget(self, keys: Sequence[K]) -> List[Optional[V]]:
"""Get the values associated with the given keys."""
encoded_keys: List[str] = [self.key_encoder(key) for key in keys]
values = self.store.mget(encoded_keys)
return [
self.value_deserializer(value) if value is not None else value
for value in values
]
async def amget(self, keys: Sequence[K]) -> List[Optional[V]]:
"""Get the values associated with the given keys."""
encoded_keys: List[str] = [self.key_encoder(key) for key in keys]
values = await self.store.amget(encoded_keys)
return [
self.value_deserializer(value) if value is not None else value
for value in values
]
def mset(self, key_value_pairs: Sequence[Tuple[K, V]]) -> None:
"""Set the values for the given keys."""
encoded_pairs = [
(self.key_encoder(key), self.value_serializer(value))
for key, value in key_value_pairs
]
self.store.mset(encoded_pairs)
async def amset(self, key_value_pairs: Sequence[Tuple[K, V]]) -> None:
"""Set the values for the given keys."""
encoded_pairs = [
(self.key_encoder(key), self.value_serializer(value))
for key, value in key_value_pairs
]
await self.store.amset(encoded_pairs)
def mdelete(self, keys: Sequence[K]) -> None:
"""Delete the given keys and their associated values."""
encoded_keys = [self.key_encoder(key) for key in keys]
self.store.mdelete(encoded_keys)
async def amdelete(self, keys: Sequence[K]) -> None:
"""Delete the given keys and their associated values."""
encoded_keys = [self.key_encoder(key) for key in keys]
await self.store.amdelete(encoded_keys)
def yield_keys(
self, *, prefix: Optional[str] = None
) -> Union[Iterator[K], Iterator[str]]:
"""Get an iterator over keys that match the given prefix."""
# For the time being this does not return K, but str
# it's for debugging purposes. Should fix this.
yield from self.store.yield_keys(prefix=prefix)
async def ayield_keys(
self, *, prefix: Optional[str] = None
) -> Union[AsyncIterator[K], AsyncIterator[str]]:
"""Get an iterator over keys that match the given prefix."""
# For the time being this does not return K, but str
# it's for debugging purposes. Should fix this.
async for key in self.store.ayield_keys(prefix=prefix):
yield key
|
"""Load agent."""
import contextlib
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.agents.loading import load_agent
from langchain.agents.types import AGENT_TO_CLASS
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
def initialize_agent(
tools: Sequence[BaseTool],
llm: BaseLanguageModel,
agent: Optional[AgentType] = None,
callback_manager: Optional[BaseCallbackManager] = None,
agent_path: Optional[str] = None,
agent_kwargs: Optional[dict] = None,
*,
tags: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
agent: Agent type to use. If None and agent_path is also None, will default
to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
callback_manager: CallbackManager to use. Global callback manager is used if
not provided. Defaults to None.
agent_path: Path to serialized agent to use. If None and agent is also None,
will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
agent_kwargs: Additional keyword arguments to pass to the underlying agent.
Defaults to None.
tags: Tags to apply to the traced runs. Defaults to None.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If both `agent` and `agent_path` are specified.
ValueError: If `agent` is not a valid agent type.
ValueError: If both `agent` and `agent_path` are None.
"""
tags_ = list(tags) if tags else []
if agent is None and agent_path is None:
agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION
if agent is not None and agent_path is not None:
msg = (
"Both `agent` and `agent_path` are specified, "
"but at most only one should be."
)
raise ValueError(msg)
if agent is not None:
if agent not in AGENT_TO_CLASS:
msg = (
f"Got unknown agent type: {agent}. "
f"Valid types are: {AGENT_TO_CLASS.keys()}."
)
raise ValueError(msg)
tags_.append(agent.value if isinstance(agent, AgentType) else agent)
agent_cls = AGENT_TO_CLASS[agent]
agent_kwargs = agent_kwargs or {}
agent_obj = agent_cls.from_llm_and_tools(
llm,
tools,
callback_manager=callback_manager,
**agent_kwargs,
)
elif agent_path is not None:
agent_obj = load_agent(
agent_path,
llm=llm,
tools=tools,
callback_manager=callback_manager,
)
with contextlib.suppress(NotImplementedError):
# TODO: Add tags from the serialized object directly.
tags_.append(agent_obj._agent_type)
else:
msg = (
"Somehow both `agent` and `agent_path` are None, this should never happen."
)
raise ValueError(msg)
return AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
callback_manager=callback_manager,
tags=tags_,
**kwargs,
)
|
"""Load agent."""
import contextlib
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.agents.loading import load_agent
from langchain.agents.types import AGENT_TO_CLASS
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
def initialize_agent(
tools: Sequence[BaseTool],
llm: BaseLanguageModel,
agent: Optional[AgentType] = None,
callback_manager: Optional[BaseCallbackManager] = None,
agent_path: Optional[str] = None,
agent_kwargs: Optional[dict] = None,
*,
tags: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
agent: Agent type to use. If None and agent_path is also None, will default
to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
callback_manager: CallbackManager to use. Global callback manager is used if
not provided. Defaults to None.
agent_path: Path to serialized agent to use. If None and agent is also None,
will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
agent_kwargs: Additional keyword arguments to pass to the underlying agent.
Defaults to None.
tags: Tags to apply to the traced runs. Defaults to None.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If both `agent` and `agent_path` are specified.
ValueError: If `agent` is not a valid agent type.
ValueError: If both `agent` and `agent_path` are None.
"""
tags_ = list(tags) if tags else []
if agent is None and agent_path is None:
agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION
if agent is not None and agent_path is not None:
msg = (
"Both `agent` and `agent_path` are specified, "
"but at most only one should be."
)
raise ValueError(msg)
if agent is not None:
if agent not in AGENT_TO_CLASS:
msg = (
f"Got unknown agent type: {agent}. "
f"Valid types are: {AGENT_TO_CLASS.keys()}."
)
raise ValueError(msg)
tags_.append(agent.value if isinstance(agent, AgentType) else agent)
agent_cls = AGENT_TO_CLASS[agent]
agent_kwargs = agent_kwargs or {}
agent_obj = agent_cls.from_llm_and_tools(
llm, tools, callback_manager=callback_manager, **agent_kwargs
)
elif agent_path is not None:
agent_obj = load_agent(
agent_path, llm=llm, tools=tools, callback_manager=callback_manager
)
with contextlib.suppress(NotImplementedError):
# TODO: Add tags from the serialized object directly.
tags_.append(agent_obj._agent_type)
else:
msg = (
"Somehow both `agent` and `agent_path` are None, this should never happen."
)
raise ValueError(msg)
return AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
callback_manager=callback_manager,
tags=tags_,
**kwargs,
)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='Audio')
class Audio(BaseDocument):
"""
Document for handling audios.
The Audio Document can contain an AudioUrl (`Audio.url`), an AudioTensor
(`Audio.tensor`), and an AnyEmbedding (`Audio.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Audio
# use it directly
audio = Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import Audio, Text
from typing import Optional
# extend it
class MyAudio(Audio):
name: Optional[Text]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
audio.name = Text(text='my first audio')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Audio, Text
# compose it
class MultiModalDoc(Document):
audio: Audio
text: Text
mmdoc = MultiModalDoc(
audio=Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor = mmdoc.audio.bytes.load()
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[AudioBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils.misc import is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
T = TypeVar('T', bound='Audio')
class Audio(BaseDocument):
"""
Document for handling audios.
The Audio Document can contain an AudioUrl (`Audio.url`), an AudioTensor
(`Audio.tensor`), and an AnyEmbedding (`Audio.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Audio
# use it directly
audio = Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import Audio, Text
from typing import Optional
# extend it
class MyAudio(Audio):
name: Optional[Text]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
audio.name = Text(text='my first audio')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Audio, Text
# compose it
class MultiModalDoc(Document):
audio: Audio
text: Text
mmdoc = MultiModalDoc(
audio=Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor = mmdoc.audio.bytes.load()
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[AudioBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
from mmdet.models.builder import HEADS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@HEADS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head with Heuristic method."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
init_cfg=None,
**kwargs):
super(HeuristicFusionHead,
self).__init__(num_things_classes, num_stuff_classes, test_cfg,
None, init_cfg, **kwargs)
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""HeuristicFusionHead has no training loss."""
return dict()
def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5):
"""Lay instance masks to a result map.
Args:
bboxes: The bboxes results, (K, 4).
labels: The labels of bboxes, (K, ).
masks: The instance masks, (K, H, W).
overlap_thr: Threshold to determine whether two masks overlap.
default: 0.5.
Returns:
Tensor: The result map, (H, W).
"""
num_insts = bboxes.shape[0]
id_map = torch.zeros(
masks.shape[-2:], device=bboxes.device, dtype=torch.long)
if num_insts == 0:
return id_map, labels
scores, bboxes = bboxes[:, -1], bboxes[:, :4]
# Sort by score to use heuristic fusion
order = torch.argsort(-scores)
bboxes = bboxes[order]
labels = labels[order]
segm_masks = masks[order]
instance_id = 1
left_labels = []
for idx in range(bboxes.shape[0]):
_cls = labels[idx]
_mask = segm_masks[idx]
instance_id_map = torch.ones_like(
_mask, dtype=torch.long) * instance_id
area = _mask.sum()
if area == 0:
continue
pasted = id_map > 0
intersect = (_mask * pasted).sum()
if (intersect / (area + 1e-5)) > overlap_thr:
continue
_part = _mask * (~pasted)
id_map = torch.where(_part, instance_id_map, id_map)
left_labels.append(_cls)
instance_id += 1
if len(left_labels) > 0:
instance_labels = torch.stack(left_labels)
else:
instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)
assert instance_id == (len(instance_labels) + 1)
return id_map, instance_labels
def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds,
**kwargs):
"""Fuse the results of instance and semantic segmentations.
Args:
det_bboxes: The bboxes results, (K, 4).
det_labels: The labels of bboxes, (K,).
mask_preds: The masks results, (K, H, W).
seg_preds: The semantic segmentation results,
(K, num_stuff + 1, H, W).
Returns:
Tensor : The panoptic segmentation result, (H, W).
"""
mask_preds = mask_preds >= self.test_cfg.mask_thr_binary
id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds,
self.test_cfg.mask_overlap)
seg_results = seg_preds.argmax(dim=0)
seg_results = seg_results + self.num_things_classes
pan_results = seg_results
instance_id = 1
for idx in range(det_labels.shape[0]):
_mask = id_map == (idx + 1)
if _mask.sum() == 0:
continue
_cls = labels[idx]
# simply trust detection
segment_id = _cls + instance_id * INSTANCE_OFFSET
pan_results[_mask] = segment_id
instance_id += 1
ids, counts = torch.unique(
pan_results % INSTANCE_OFFSET, return_counts=True)
stuff_ids = ids[ids >= self.num_things_classes]
stuff_counts = counts[ids >= self.num_things_classes]
ignore_stuff_ids = stuff_ids[
stuff_counts < self.test_cfg.stuff_area_limit]
assert pan_results.ndim == 2
pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(
1, 1, -1)).any(dim=2)] = self.num_classes
return pan_results
|
import torch
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
from mmdet.models.builder import HEADS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@HEADS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head with Heuristic method."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
init_cfg=None,
**kwargs):
super(HeuristicFusionHead,
self).__init__(num_things_classes, num_stuff_classes, test_cfg,
None, init_cfg, **kwargs)
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""HeuristicFusionHead has no training loss."""
return dict()
def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5):
"""Lay instance masks to a result map.
Args:
bboxes: The bboxes results, (K, 4).
labels: The labels of bboxes, (K, ).
masks: The instance masks, (K, H, W).
overlap_thr: Threshold to determine whether two masks overlap.
default: 0.5.
Returns:
Tensor: The result map, (H, W).
"""
num_insts = bboxes.shape[0]
id_map = torch.zeros(
masks.shape[-2:], device=bboxes.device, dtype=torch.long)
if num_insts == 0:
return id_map, labels
scores, bboxes = bboxes[:, -1], bboxes[:, :4]
# Sort by score to use heuristic fusion
order = torch.argsort(-scores)
bboxes = bboxes[order]
labels = labels[order]
segm_masks = masks[order]
instance_id = 1
left_labels = []
for idx in range(bboxes.shape[0]):
_cls = labels[idx]
_mask = segm_masks[idx]
instance_id_map = torch.ones_like(
_mask, dtype=torch.long) * instance_id
area = _mask.sum()
if area == 0:
continue
pasted = id_map > 0
intersect = (_mask * pasted).sum()
if (intersect / (area + 1e-5)) > overlap_thr:
continue
_part = _mask * (~pasted)
id_map = torch.where(_part, instance_id_map, id_map)
left_labels.append(_cls)
instance_id += 1
if len(left_labels) > 0:
instance_labels = torch.stack(left_labels)
else:
instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)
assert instance_id == (len(instance_labels) + 1)
return id_map, instance_labels
def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds,
**kwargs):
"""Fuse the results of instance and semantic segmentations.
Args:
det_bboxes: The bboxes results, (K, 4).
det_labels: The labels of bboxes, (K,).
mask_preds: The masks results, (K, H, W).
seg_preds: The semantic segmentation results,
(K, num_stuff + 1, H, W).
Returns:
Tensor : The panoptic segmentation result, (H, W).
"""
mask_preds = mask_preds >= self.test_cfg.mask_thr_binary
id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds,
self.test_cfg.mask_overlap)
seg_results = seg_preds.argmax(dim=0)
seg_results = seg_results + self.num_things_classes
pan_results = seg_results
instance_id = 1
for idx in range(det_labels.shape[0]):
_mask = id_map == (idx + 1)
if _mask.sum() == 0:
continue
_cls = labels[idx]
# simply trust detection
segment_id = _cls + instance_id * INSTANCE_OFFSET
pan_results[_mask] = segment_id
instance_id += 1
ids, counts = torch.unique(
pan_results % INSTANCE_OFFSET, return_counts=True)
stuff_ids = ids[ids >= self.num_things_classes]
stuff_counts = counts[ids >= self.num_things_classes]
ignore_stuff_ids = stuff_ids[
stuff_counts < self.test_cfg.stuff_area_limit]
assert pan_results.ndim == 2
pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(
1, 1, -1)).any(dim=2)] = self.num_classes
return pan_results
|
import numpy as np
from docarray.array import DocumentArray
from docarray.document import BaseDocument
from docarray.typing import NdArray
def test_get_bulk_attributes_function():
class Mmdoc(BaseDocument):
text: str
tensor: NdArray
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'hello{i}', tensor=np.zeros((3, 224, 224))) for i in range(N))
)
tensors = da._get_array_attribute('tensor')
assert len(tensors) == N
for tensor in tensors:
assert tensor.shape == (3, 224, 224)
texts = da._get_array_attribute('text')
assert len(texts) == N
for i, text in enumerate(texts):
assert text == f'hello{i}'
def test_set_attributes():
class InnerDoc(BaseDocument):
text: str
class Mmdoc(BaseDocument):
inner: InnerDoc
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(inner=InnerDoc(text=f'hello{i}')) for i in range(N))
)
list_docs = [InnerDoc(text=f'hello{i}') for i in range(N)]
da._set_array_attribute('inner', list_docs)
for doc, list_doc in zip(da, list_docs):
assert doc.inner is list_doc
def test_get_bulk_attributes():
class Mmdoc(BaseDocument):
text: str
tensor: NdArray
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'hello{i}', tensor=np.zeros((3, 224, 224))) for i in range(N))
)
tensors = da.tensor
assert len(tensors) == N
for tensor in tensors:
assert tensor.shape == (3, 224, 224)
texts = da.text
assert len(texts) == N
for i, text in enumerate(texts):
assert text == f'hello{i}'
def test_get_bulk_attributes_document():
class InnerDoc(BaseDocument):
text: str
class Mmdoc(BaseDocument):
inner: InnerDoc
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(inner=InnerDoc(text=f'hello{i}')) for i in range(N))
)
assert isinstance(da.inner, DocumentArray)
|
import numpy as np
from docarray.array import DocumentArray
from docarray.document import BaseDocument
from docarray.typing import NdArray
def test_get_bulk_attributes_function():
class Mmdoc(BaseDocument):
text: str
tensor: NdArray
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'hello{i}', tensor=np.zeros((3, 224, 224))) for i in range(N))
)
tensors = da._get_documents_attribute('tensor')
assert len(tensors) == N
for tensor in tensors:
assert tensor.shape == (3, 224, 224)
texts = da._get_documents_attribute('text')
assert len(texts) == N
for i, text in enumerate(texts):
assert text == f'hello{i}'
def test_get_bulk_attributes():
class Mmdoc(BaseDocument):
text: str
tensor: NdArray
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(text=f'hello{i}', tensor=np.zeros((3, 224, 224))) for i in range(N))
)
tensors = da.tensor
assert len(tensors) == N
for tensor in tensors:
assert tensor.shape == (3, 224, 224)
texts = da.text
assert len(texts) == N
for i, text in enumerate(texts):
assert text == f'hello{i}'
def test_get_bulk_attributes_document():
class InnerDoc(BaseDocument):
text: str
class Mmdoc(BaseDocument):
inner: InnerDoc
N = 10
da = DocumentArray[Mmdoc](
(Mmdoc(inner=InnerDoc(text=f'hello{i}')) for i in range(N))
)
assert isinstance(da.inner, DocumentArray)
|
from __future__ import annotations
import logging
from typing import Literal
import torch
from torch import Tensor
from sentence_transformers.models.InputModule import InputModule
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(InputModule):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
save_in_root: bool = False
config_keys: list[str] = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
def __init__(
self,
vocab: list[str],
word_weights: dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super().__init__()
vocab = list(dict.fromkeys(vocab)) # Ensure vocab is unique
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
f"{num_unknown_words} out of {len(vocab)} words without a weighting value. Set weight to {unknown_word_weight}"
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: list[str], **kwargs) -> list[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(
self, tokenized_texts: list[list[int]], pad_seq_length: int = 0
) -> dict[Literal["sentence_embedding"], torch.Tensor]:
vectors = []
for tokens in tokenized_texts:
vector = torch.zeros(self.get_sentence_embedding_dimension(), dtype=torch.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.stack(vectors)}
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
|
from __future__ import annotations
import json
import logging
import os
from typing import Literal
import torch
from torch import Tensor, nn
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: list[str],
word_weights: dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super().__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
f"{num_unknown_words} out of {len(vocab)} words without a weighting value. Set weight to {unknown_word_weight}"
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: list[str], **kwargs) -> list[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(
self, tokenized_texts: list[list[int]], pad_seq_length: int = 0
) -> dict[Literal["sentence_embedding"], torch.Tensor]:
vectors = []
for tokens in tokenized_texts:
vector = torch.zeros(self.get_sentence_embedding_dimension(), dtype=torch.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.stack(vectors)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from laser_encoder import LaserEncoder
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='session')
def basic_encoder() -> LaserEncoder:
return LaserEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.language == 'en'
def test_no_document(basic_encoder: LaserEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
@pytest.mark.gpu
def test_encoding_gpu():
encoder = LaserEncoder(device='cuda')
docs = DocumentArray((Document(text='random text')))
encoder.encode(docs, {})
assert len(docs.get_attributes('embedding')) == 1
assert docs[0].embedding.shape == (1024,)
def test_no_text_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_encoding_cpu(basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text='hello there')])
basic_encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'language, sentence',
[
('en', 'Today is a nice day'),
('es', 'hoy es un buen día'),
('ru', 'сегодня хороший день'),
],
)
def test_languages(language: str, sentence: str, basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text=sentence)])
basic_encoder.encode(docs, {'language': language})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: LaserEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
def test_no_documents():
encoder = LaserEncoder()
docs = []
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert not docs
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: LaserEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: LaserEncoder):
docs = DocumentArray(
[
# Different than usual example - because embeddings suck (manually verified
# using the laser embedings module)
Document(id='A', text='car'),
Document(id='B', text='truck'),
Document(id='C', text='radio'),
Document(id='D', text='TV'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from laser_encoder import LaserEncoder
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='session')
def basic_encoder() -> LaserEncoder:
return LaserEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_language == 'en'
def test_no_document(basic_encoder: LaserEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
@pytest.mark.gpu
def test_encoding_gpu():
encoder = LaserEncoder(device='cuda')
docs = DocumentArray((Document(text='random text')))
encoder.encode(docs, {})
assert len(docs.get_attributes('embedding')) == 1
assert docs[0].embedding.shape == (1024,)
def test_no_text_documents(basic_encoder: LaserEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_encoding_cpu(basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text='hello there')])
basic_encoder.encode(docs, {})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'language, sentence',
[
('en', 'Today is a nice day'),
('es', 'hoy es un buen día'),
('ru', 'сегодня хороший день'),
],
)
def test_languages(language: str, sentence: str, basic_encoder: LaserEncoder):
docs = DocumentArray([Document(text=sentence)])
basic_encoder.encode(docs, {'language': language})
assert docs[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: LaserEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
def test_no_documents():
encoder = LaserEncoder()
docs = []
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert not docs
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: LaserEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: LaserEncoder):
docs = DocumentArray(
[
# Different than usual example - because embeddings suck (manually verified
# using the laser embedings module)
Document(id='A', text='car'),
Document(id='B', text='truck'),
Document(id='C', text='radio'),
Document(id='D', text='TV'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import (BatchFixedSizePad, BatchResize,
BatchSyncRandomResize, DetDataPreprocessor,
MultiBranchDataPreprocessor)
__all__ = [
'DetDataPreprocessor', 'BatchSyncRandomResize', 'BatchFixedSizePad',
'MultiBranchDataPreprocessor', 'BatchResize'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .data_preprocessor import (BatchFixedSizePad, BatchSyncRandomResize,
DetDataPreprocessor,
MultiBranchDataPreprocessor)
__all__ = [
'DetDataPreprocessor', 'BatchSyncRandomResize', 'BatchFixedSizePad',
'MultiBranchDataPreprocessor'
]
|
from functools import partial
from typing import Any, Optional
import torch
import torch.nn as nn
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = ["AlexNet", "AlexNet_Weights", "alexnet"]
class AlexNet(nn.Module):
def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None:
super().__init__()
_log_api_usage_once(self)
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
class AlexNet_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/alexnet-owt-7be5be79.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
"num_params": 61100840,
"min_size": (63, 63),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#alexnet-and-vgg",
"_metrics": {
"ImageNet-1K": {
"acc@1": 56.522,
"acc@5": 79.066,
}
},
"_ops": 0.714,
"_file_size": 233.087,
"_docs": """
These weights reproduce closely the results of the paper using a simplified training recipe.
""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", AlexNet_Weights.IMAGENET1K_V1))
def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, **kwargs: Any) -> AlexNet:
"""AlexNet model architecture from `One weird trick for parallelizing convolutional neural networks <https://arxiv.org/abs/1404.5997>`__.
.. note::
AlexNet was originally introduced in the `ImageNet Classification with
Deep Convolutional Neural Networks
<https://papers.nips.cc/paper/2012/hash/c399862d3b9d6b76c8436e924a68c45b-Abstract.html>`__
paper. Our implementation is based instead on the "One weird trick"
paper above.
Args:
weights (:class:`~torchvision.models.AlexNet_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.AlexNet_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.squeezenet.AlexNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/alexnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.AlexNet_Weights
:members:
"""
weights = AlexNet_Weights.verify(weights)
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = AlexNet(**kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
|
from functools import partial
from typing import Any, Optional
import torch
import torch.nn as nn
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = ["AlexNet", "AlexNet_Weights", "alexnet"]
class AlexNet(nn.Module):
def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None:
super().__init__()
_log_api_usage_once(self)
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
class AlexNet_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/alexnet-owt-7be5be79.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
"num_params": 61100840,
"min_size": (63, 63),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#alexnet-and-vgg",
"_metrics": {
"ImageNet-1K": {
"acc@1": 56.522,
"acc@5": 79.066,
}
},
"_ops": 0.714,
"_file_size": 233.087,
"_docs": """
These weights reproduce closely the results of the paper using a simplified training recipe.
""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", AlexNet_Weights.IMAGENET1K_V1))
def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, **kwargs: Any) -> AlexNet:
"""AlexNet model architecture from `One weird trick for parallelizing convolutional neural networks <https://arxiv.org/abs/1404.5997>`__.
.. note::
AlexNet was originally introduced in the `ImageNet Classification with
Deep Convolutional Neural Networks
<https://papers.nips.cc/paper/2012/hash/c399862d3b9d6b76c8436e924a68c45b-Abstract.html>`__
paper. Our implementation is based instead on the "One weird trick"
paper above.
Args:
weights (:class:`~torchvision.models.AlexNet_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.AlexNet_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.squeezenet.AlexNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/alexnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.AlexNet_Weights
:members:
"""
weights = AlexNet_Weights.verify(weights)
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = AlexNet(**kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs
model_urls = _ModelURLs(
{
"alexnet": AlexNet_Weights.IMAGENET1K_V1.url,
}
)
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SparkDatasetReader(AbstractDatasetReader):
"""A dataset reader that reads from a Spark DataFrame.
When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be
provided. Streaming is not currently supported.
"""
def __init__(
self,
df: pyspark.sql.DataFrame,
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
streaming: bool = True,
cache_dir: str = None,
keep_in_memory: bool = False,
working_dir: str = None,
load_from_cache_file: bool = True,
file_format: str = "arrow",
**kwargs,
):
super().__init__(
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
**kwargs,
)
self._load_from_cache_file = load_from_cache_file
self._file_format = file_format
self.builder = Spark(
df=df,
features=features,
cache_dir=cache_dir,
working_dir=working_dir,
**kwargs,
)
def read(self):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=download_mode,
file_format=self._file_format,
)
return self.builder.as_dataset(split=self.split)
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SparkDatasetReader(AbstractDatasetReader):
"""A dataset reader that reads from a Spark DataFrame.
When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be
provided. Streaming is not currently supported.
"""
def __init__(
self,
df: pyspark.sql.DataFrame,
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
streaming: bool = True,
cache_dir: str = None,
keep_in_memory: bool = False,
load_from_cache_file: bool = True,
file_format: str = "arrow",
**kwargs,
):
super().__init__(
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
**kwargs,
)
self._load_from_cache_file = load_from_cache_file
self._file_format = file_format
self.builder = Spark(
df=df,
features=features,
cache_dir=cache_dir,
**kwargs,
)
def read(self):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=download_mode,
file_format=self._file_format,
)
return self.builder.as_dataset(split=self.split)
|
from typing import Any
import pytest
from langchain_tests.conftest import CustomPersister, CustomSerializer
from langchain_tests.conftest import _base_vcr_config as _base_vcr_config
from vcr import VCR # type: ignore[import-untyped]
def remove_request_headers(request: Any) -> Any:
for k in request.headers:
request.headers[k] = "**REDACTED**"
return request
def remove_response_headers(response: dict) -> dict:
for k in response["headers"]:
response["headers"][k] = "**REDACTED**"
return response
@pytest.fixture(scope="session")
def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811
"""
Extend the default configuration coming from langchain_tests.
"""
config = _base_vcr_config.copy()
config["before_record_request"] = remove_request_headers
config["before_record_response"] = remove_response_headers
config["serializer"] = "yaml.gz"
config["path_transformer"] = VCR.ensure_suffix(".yaml.gz")
return config
def pytest_recording_configure(config: dict, vcr: VCR) -> None:
vcr.register_persister(CustomPersister())
vcr.register_serializer("yaml.gz", CustomSerializer())
|
from typing import Any
import pytest
from langchain_tests.conftest import YamlGzipSerializer
from langchain_tests.conftest import _base_vcr_config as _base_vcr_config
from vcr import VCR # type: ignore[import-untyped]
def remove_request_headers(request: Any) -> Any:
for k in request.headers:
request.headers[k] = "**REDACTED**"
return request
def remove_response_headers(response: dict) -> dict:
for k in response["headers"]:
response["headers"][k] = "**REDACTED**"
return response
@pytest.fixture(scope="session")
def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811
"""
Extend the default configuration coming from langchain_tests.
"""
config = _base_vcr_config.copy()
config["before_record_request"] = remove_request_headers
config["before_record_response"] = remove_response_headers
config["serializer"] = "yaml.gz"
config["path_transformer"] = VCR.ensure_suffix(".yaml.gz")
return config
@pytest.fixture
def vcr(vcr_config: dict) -> VCR:
"""Override the default vcr fixture to include custom serializers"""
my_vcr = VCR(**vcr_config)
my_vcr.register_serializer("yaml.gz", YamlGzipSerializer)
return my_vcr
|
import warnings
from typing import Any, List, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
call = ", num_output_channels=3" if num_output_channels == 3 else ""
replacement = "convert_color_space(..., color_space=features.ColorSpace.GRAY)"
if num_output_channels == 3:
replacement = f"convert_color_space({replacement}, color_space=features.ColorSpace.RGB)"
warnings.warn(
f"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. "
f"Instead, please use `{replacement}`.",
)
return _F.to_grayscale(inpt, num_output_channels=num_output_channels)
def rgb_to_grayscale(
inpt: Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT], num_output_channels: int = 1
) -> Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT]:
if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)):
inpt = inpt.as_subclass(torch.Tensor)
old_color_space = None
elif isinstance(inpt, torch.Tensor):
old_color_space = features._image._from_tensor_shape(inpt.shape) # type: ignore[arg-type]
else:
old_color_space = None
call = ", num_output_channels=3" if num_output_channels == 3 else ""
replacement = (
f"convert_color_space(..., color_space=features.ColorSpace.GRAY"
f"{f', old_color_space=features.ColorSpace.{old_color_space}' if old_color_space is not None else ''})"
)
if num_output_channels == 3:
replacement = (
f"convert_color_space({replacement}, color_space=features.ColorSpace.RGB"
f"{f', old_color_space=features.ColorSpace.GRAY' if old_color_space is not None else ''})"
)
warnings.warn(
f"The function `rgb_to_grayscale(...{call})` is deprecated in will be removed in a future release. "
f"Instead, please use `{replacement}`.",
)
return _F.rgb_to_grayscale(inpt, num_output_channels=num_output_channels)
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image_tensor(...)` followed by `convert_image_dtype(...)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: Union[features.ImageTypeJIT, features.VideoTypeJIT]) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
import warnings
from typing import Any, List, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
call = ", num_output_channels=3" if num_output_channels == 3 else ""
replacement = "convert_color_space(..., color_space=features.ColorSpace.GRAY)"
if num_output_channels == 3:
replacement = f"convert_color_space({replacement}, color_space=features.ColorSpace.RGB)"
warnings.warn(
f"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. "
f"Instead, please use `{replacement}`.",
)
return _F.to_grayscale(inpt, num_output_channels=num_output_channels)
def rgb_to_grayscale(
inpt: Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT], num_output_channels: int = 1
) -> Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT]:
old_color_space = (
features._image._from_tensor_shape(inpt.shape) # type: ignore[arg-type]
if isinstance(inpt, torch.Tensor)
and (torch.jit.is_scripting() or not isinstance(inpt, (features.Image, features.Video)))
else None
)
call = ", num_output_channels=3" if num_output_channels == 3 else ""
replacement = (
f"convert_color_space(..., color_space=features.ColorSpace.GRAY"
f"{f', old_color_space=features.ColorSpace.{old_color_space}' if old_color_space is not None else ''})"
)
if num_output_channels == 3:
replacement = (
f"convert_color_space({replacement}, color_space=features.ColorSpace.RGB"
f"{f', old_color_space=features.ColorSpace.GRAY' if old_color_space is not None else ''})"
)
warnings.warn(
f"The function `rgb_to_grayscale(...{call})` is deprecated in will be removed in a future release. "
f"Instead, please use `{replacement}`.",
)
return _F.rgb_to_grayscale(inpt, num_output_channels=num_output_channels)
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image_tensor(...)` followed by `convert_image_dtype(...)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: Union[features.ImageTypeJIT, features.VideoTypeJIT]) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet.models.dense_heads import NASFCOSHead
class TestNASFCOSHead(TestCase):
def test_nasfcos_head_loss(self):
"""Tests nasfcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
nasfcos_head = NASFCOSHead(
num_classes=4,
in_channels=2, # the same as `deform_groups` in dconv3x3_config
feat_channels=2,
norm_cfg=None)
# Nasfcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 2, s // stride[1], s // stride[0]).float()
for stride in nasfcos_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = nasfcos_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = nasfcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
empty_ctr_loss = empty_gt_losses['loss_centerness'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = nasfcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
onegt_ctr_loss = one_gt_losses['loss_centerness'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0,
'centerness loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet.models.dense_heads import NASFCOSHead
class TestNASFCOSHead(TestCase):
def test_nasfcos_head_loss(self):
"""Tests nasfcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
nasfcos_head = NASFCOSHead(
num_classes=4,
in_channels=2, # the same as `deform_groups` in dconv3x3_config
feat_channels=2,
norm_cfg=None)
# Nasfcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 2, s // stride[1], s // stride[0]).float()
for stride in nasfcos_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = nasfcos_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = nasfcos_head.loss(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
empty_ctr_loss = empty_gt_losses['loss_centerness'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = nasfcos_head.loss(cls_scores, bbox_preds, centernesses,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
onegt_ctr_loss = one_gt_losses['loss_centerness'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0,
'centerness loss should be non-zero')
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
OAuth2Credentials,
)
from backend.util.settings import Secrets
secrets = Secrets()
GITHUB_OAUTH_IS_CONFIGURED = bool(
secrets.github_client_id and secrets.github_client_secret
)
GithubCredentials = APIKeyCredentials | OAuth2Credentials
GithubCredentialsInput = CredentialsMetaInput[
Literal["github"],
Literal["api_key", "oauth2"] if GITHUB_OAUTH_IS_CONFIGURED else Literal["api_key"],
]
def GithubCredentialsField(scope: str) -> GithubCredentialsInput:
"""
Creates a GitHub credentials input on a block.
Params:
scope: The authorization scope needed for the block to work. ([list of available scopes](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/scopes-for-oauth-apps#available-scopes))
""" # noqa
return CredentialsField(
provider="github",
supported_credential_types=(
{"api_key", "oauth2"} if GITHUB_OAUTH_IS_CONFIGURED else {"api_key"}
),
required_scopes={scope},
description="The GitHub integration can be used with OAuth, "
"or any API key with sufficient permissions for the blocks it is used on.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="github",
api_key=SecretStr("mock-github-api-key"),
title="Mock GitHub API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
|
from typing import Literal
from autogpt_libs.supabase_integration_credentials_store.types import (
APIKeyCredentials,
OAuth2Credentials,
)
from pydantic import SecretStr
from backend.data.model import CredentialsField, CredentialsMetaInput
from backend.util.settings import Secrets
secrets = Secrets()
GITHUB_OAUTH_IS_CONFIGURED = bool(
secrets.github_client_id and secrets.github_client_secret
)
GithubCredentials = APIKeyCredentials | OAuth2Credentials
GithubCredentialsInput = CredentialsMetaInput[
Literal["github"],
Literal["api_key", "oauth2"] if GITHUB_OAUTH_IS_CONFIGURED else Literal["api_key"],
]
def GithubCredentialsField(scope: str) -> GithubCredentialsInput:
"""
Creates a GitHub credentials input on a block.
Params:
scope: The authorization scope needed for the block to work. ([list of available scopes](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/scopes-for-oauth-apps#available-scopes))
""" # noqa
return CredentialsField(
provider="github",
supported_credential_types=(
{"api_key", "oauth2"} if GITHUB_OAUTH_IS_CONFIGURED else {"api_key"}
),
required_scopes={scope},
description="The GitHub integration can be used with OAuth, "
"or any API key with sufficient permissions for the blocks it is used on.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="github",
api_key=SecretStr("mock-github-api-key"),
title="Mock GitHub API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET
from mmdet.registry import MODELS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@MODELS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head with Heuristic method."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
init_cfg=None,
**kwargs):
super(HeuristicFusionHead,
self).__init__(num_things_classes, num_stuff_classes, test_cfg,
None, init_cfg, **kwargs)
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""HeuristicFusionHead has no training loss."""
return dict()
def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5):
"""Lay instance masks to a result map.
Args:
bboxes: The bboxes results, (K, 4).
labels: The labels of bboxes, (K, ).
masks: The instance masks, (K, H, W).
overlap_thr: Threshold to determine whether two masks overlap.
default: 0.5.
Returns:
Tensor: The result map, (H, W).
"""
num_insts = bboxes.shape[0]
id_map = torch.zeros(
masks.shape[-2:], device=bboxes.device, dtype=torch.long)
if num_insts == 0:
return id_map, labels
scores, bboxes = bboxes[:, -1], bboxes[:, :4]
# Sort by score to use heuristic fusion
order = torch.argsort(-scores)
bboxes = bboxes[order]
labels = labels[order]
segm_masks = masks[order]
instance_id = 1
left_labels = []
for idx in range(bboxes.shape[0]):
_cls = labels[idx]
_mask = segm_masks[idx]
instance_id_map = torch.ones_like(
_mask, dtype=torch.long) * instance_id
area = _mask.sum()
if area == 0:
continue
pasted = id_map > 0
intersect = (_mask * pasted).sum()
if (intersect / (area + 1e-5)) > overlap_thr:
continue
_part = _mask * (~pasted)
id_map = torch.where(_part, instance_id_map, id_map)
left_labels.append(_cls)
instance_id += 1
if len(left_labels) > 0:
instance_labels = torch.stack(left_labels)
else:
instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)
assert instance_id == (len(instance_labels) + 1)
return id_map, instance_labels
def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds,
**kwargs):
"""Fuse the results of instance and semantic segmentations.
Args:
det_bboxes: The bboxes results, (K, 4).
det_labels: The labels of bboxes, (K,).
mask_preds: The masks results, (K, H, W).
seg_preds: The semantic segmentation results,
(K, num_stuff + 1, H, W).
Returns:
Tensor : The panoptic segmentation result, (H, W).
"""
mask_preds = mask_preds >= self.test_cfg.mask_thr_binary
id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds,
self.test_cfg.mask_overlap)
seg_results = seg_preds.argmax(dim=0)
seg_results = seg_results + self.num_things_classes
pan_results = seg_results
instance_id = 1
for idx in range(det_labels.shape[0]):
_mask = id_map == (idx + 1)
if _mask.sum() == 0:
continue
_cls = labels[idx]
# simply trust detection
segment_id = _cls + instance_id * INSTANCE_OFFSET
pan_results[_mask] = segment_id
instance_id += 1
ids, counts = torch.unique(
pan_results % INSTANCE_OFFSET, return_counts=True)
stuff_ids = ids[ids >= self.num_things_classes]
stuff_counts = counts[ids >= self.num_things_classes]
ignore_stuff_ids = stuff_ids[
stuff_counts < self.test_cfg.stuff_area_limit]
assert pan_results.ndim == 2
pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(
1, 1, -1)).any(dim=2)] = self.num_classes
return pan_results
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET
from mmdet.models.builder import HEADS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@HEADS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head with Heuristic method."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
init_cfg=None,
**kwargs):
super(HeuristicFusionHead,
self).__init__(num_things_classes, num_stuff_classes, test_cfg,
None, init_cfg, **kwargs)
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""HeuristicFusionHead has no training loss."""
return dict()
def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5):
"""Lay instance masks to a result map.
Args:
bboxes: The bboxes results, (K, 4).
labels: The labels of bboxes, (K, ).
masks: The instance masks, (K, H, W).
overlap_thr: Threshold to determine whether two masks overlap.
default: 0.5.
Returns:
Tensor: The result map, (H, W).
"""
num_insts = bboxes.shape[0]
id_map = torch.zeros(
masks.shape[-2:], device=bboxes.device, dtype=torch.long)
if num_insts == 0:
return id_map, labels
scores, bboxes = bboxes[:, -1], bboxes[:, :4]
# Sort by score to use heuristic fusion
order = torch.argsort(-scores)
bboxes = bboxes[order]
labels = labels[order]
segm_masks = masks[order]
instance_id = 1
left_labels = []
for idx in range(bboxes.shape[0]):
_cls = labels[idx]
_mask = segm_masks[idx]
instance_id_map = torch.ones_like(
_mask, dtype=torch.long) * instance_id
area = _mask.sum()
if area == 0:
continue
pasted = id_map > 0
intersect = (_mask * pasted).sum()
if (intersect / (area + 1e-5)) > overlap_thr:
continue
_part = _mask * (~pasted)
id_map = torch.where(_part, instance_id_map, id_map)
left_labels.append(_cls)
instance_id += 1
if len(left_labels) > 0:
instance_labels = torch.stack(left_labels)
else:
instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)
assert instance_id == (len(instance_labels) + 1)
return id_map, instance_labels
def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds,
**kwargs):
"""Fuse the results of instance and semantic segmentations.
Args:
det_bboxes: The bboxes results, (K, 4).
det_labels: The labels of bboxes, (K,).
mask_preds: The masks results, (K, H, W).
seg_preds: The semantic segmentation results,
(K, num_stuff + 1, H, W).
Returns:
Tensor : The panoptic segmentation result, (H, W).
"""
mask_preds = mask_preds >= self.test_cfg.mask_thr_binary
id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds,
self.test_cfg.mask_overlap)
seg_results = seg_preds.argmax(dim=0)
seg_results = seg_results + self.num_things_classes
pan_results = seg_results
instance_id = 1
for idx in range(det_labels.shape[0]):
_mask = id_map == (idx + 1)
if _mask.sum() == 0:
continue
_cls = labels[idx]
# simply trust detection
segment_id = _cls + instance_id * INSTANCE_OFFSET
pan_results[_mask] = segment_id
instance_id += 1
ids, counts = torch.unique(
pan_results % INSTANCE_OFFSET, return_counts=True)
stuff_ids = ids[ids >= self.num_things_classes]
stuff_counts = counts[ids >= self.num_things_classes]
ignore_stuff_ids = stuff_ids[
stuff_counts < self.test_cfg.stuff_area_limit]
assert pan_results.ndim == 2
pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(
1, 1, -1)).any(dim=2)] = self.num_classes
return pan_results
|
import numpy as np
import pytest
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import NdArray
@pytest.mark.proto
def test_ndarray():
original_ndarray = np.zeros((3, 224, 224))
custom_ndarray = NdArray._docarray_from_native(original_ndarray)
tensor = NdArray.from_protobuf(custom_ndarray.to_protobuf())
assert (tensor == original_ndarray).all()
@pytest.mark.proto
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
ndarray = NdArray._docarray_from_native(np.zeros((3, 224, 224)))
nd_proto = ndarray.to_protobuf()
nested_item2 = NodeProto(ndarray=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocumentProto(data=data)
|
import numpy as np
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import NdArray
def test_ndarray():
original_ndarray = np.zeros((3, 224, 224))
custom_ndarray = NdArray._docarray_from_native(original_ndarray)
tensor = NdArray.from_protobuf(custom_ndarray.to_protobuf())
assert (tensor == original_ndarray).all()
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
ndarray = NdArray._docarray_from_native(np.zeros((3, 224, 224)))
nd_proto = ndarray.to_protobuf()
nested_item2 = NodeProto(ndarray=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocumentProto(data=data)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
input_size = 300
model = dict(
bbox_head=dict(
type='SSDHead',
anchor_generator=dict(
type='LegacySSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='LegacyDeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])))
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
input_size = 300
model = dict(
bbox_head=dict(
type='SSDHead',
anchor_generator=dict(
type='LegacySSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='LegacyDeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=3,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict(_delete_=True)
dist_params = dict(backend='nccl', port=29555)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
import pytest
from jina.enums import ProtocolType
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser, set_pod_parser
@pytest.mark.parametrize(
'port,expected_port',
[
('12345', [12345]),
([12345], [12345]),
([12345, 12344], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
('http', [ProtocolType.HTTP]),
(['GRPC'], [ProtocolType.GRPC]),
(['grpc', 'http'], [ProtocolType.GRPC, ProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_kwargs(
port, protocol, expected_port, expected_protocol
):
args = ArgNamespace.kwargs2namespace(
{'port': port, 'protocol': protocol}, set_gateway_parser()
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
(['12345, 12344'], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
(['http'], [ProtocolType.HTTP]),
(['GRPC'], [ProtocolType.GRPC]),
(['grpc', 'http'], [ProtocolType.GRPC, ProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_args_list(
port, protocol, expected_port, expected_protocol
):
args = set_gateway_parser().parse_args(
['--port'] + port + ['--protocol'] + protocol
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
(['12345, 12344'], [12345, 12344]),
],
)
def test_pod_port_cast(port, expected_port):
args = set_pod_parser().parse_args(['--port'] + port)
assert args.port == expected_port
def test_pod_port_default():
args = set_pod_parser().parse_args([])
assert isinstance(args.port, list)
assert len(args.port) == 1
@pytest.mark.parametrize(
'host,expected_host',
[
(['localhost'], ['localhost']),
(['0.0.0.0,localhost'], ['0.0.0.0', 'localhost']),
(['0.0.0.0,localhost', '127.0.0.1'], ['0.0.0.0', 'localhost', '127.0.0.1']),
],
)
def test_pod_host_cast(host, expected_host):
args = set_pod_parser().parse_args(['--host'] + host)
assert args.host == expected_host
def test_pod_host_default():
from jina.constants import __default_host__
args = set_pod_parser().parse_args([])
assert args.host == [__default_host__]
def test_default_port_protocol_gateway():
args = set_gateway_parser().parse_args([])
assert len(args.port) == 1
assert args.protocol == [ProtocolType.GRPC]
def test_get_non_defaults_args():
args = set_gateway_parser().parse_args(
[
'--port',
'12345',
'12344',
'--protocol',
'grpc',
'--uses',
'MyCustomGateway',
'--uses-with',
'{"arg":"value"}',
]
)
non_defaults = ArgNamespace.get_non_defaults_args(
args,
set_gateway_parser(),
)
assert non_defaults['port'] == [12345, 12344]
assert 'protocol' not in non_defaults
assert non_defaults['uses'] == 'MyCustomGateway'
assert non_defaults['uses_with'] == {'arg': 'value'}
|
import pytest
from jina.enums import GatewayProtocolType
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser, set_pod_parser
@pytest.mark.parametrize(
'port,expected_port',
[
('12345', [12345]),
([12345], [12345]),
([12345, 12344], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
('http', [GatewayProtocolType.HTTP]),
(['GRPC'], [GatewayProtocolType.GRPC]),
(['grpc', 'http'], [GatewayProtocolType.GRPC, GatewayProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_kwargs(
port, protocol, expected_port, expected_protocol
):
args = ArgNamespace.kwargs2namespace(
{'port': port, 'protocol': protocol}, set_gateway_parser()
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
(['12345, 12344'], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
(['http'], [GatewayProtocolType.HTTP]),
(['GRPC'], [GatewayProtocolType.GRPC]),
(['grpc', 'http'], [GatewayProtocolType.GRPC, GatewayProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_args_list(
port, protocol, expected_port, expected_protocol
):
args = set_gateway_parser().parse_args(
['--port'] + port + ['--protocol'] + protocol
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
(['12345, 12344'], [12345, 12344]),
],
)
def test_pod_port_cast(port, expected_port):
args = set_pod_parser().parse_args(['--port'] + port)
assert args.port == expected_port
def test_pod_port_default():
args = set_pod_parser().parse_args([])
assert isinstance(args.port, list)
assert len(args.port) == 1
@pytest.mark.parametrize(
'host,expected_host',
[
(['localhost'], ['localhost']),
(['0.0.0.0,localhost'], ['0.0.0.0', 'localhost']),
(['0.0.0.0,localhost', '127.0.0.1'], ['0.0.0.0', 'localhost', '127.0.0.1']),
],
)
def test_pod_host_cast(host, expected_host):
args = set_pod_parser().parse_args(['--host'] + host)
assert args.host == expected_host
def test_pod_host_default():
from jina.constants import __default_host__
args = set_pod_parser().parse_args([])
assert args.host == [__default_host__]
def test_default_port_protocol_gateway():
args = set_gateway_parser().parse_args([])
assert len(args.port) == 1
assert args.protocol == [GatewayProtocolType.GRPC]
def test_get_non_defaults_args():
args = set_gateway_parser().parse_args(
[
'--port',
'12345',
'12344',
'--protocol',
'grpc',
'--uses',
'MyCustomGateway',
'--uses-with',
'{"arg":"value"}',
]
)
non_defaults = ArgNamespace.get_non_defaults_args(
args,
set_gateway_parser(),
)
assert non_defaults['port'] == [12345, 12344]
assert 'protocol' not in non_defaults
assert non_defaults['uses'] == 'MyCustomGateway'
assert non_defaults['uses_with'] == {'arg': 'value'}
|
import re
from typing import Any, Optional
from langchain_text_splitters import RecursiveCharacterTextSplitter
class JSFrameworkTextSplitter(RecursiveCharacterTextSplitter):
"""Text splitter that handles React (JSX), Vue, and Svelte code.
This splitter extends RecursiveCharacterTextSplitter to handle
React (JSX), Vue, and Svelte code by:
1. Detecting and extracting custom component tags from the text
2. Using those tags as additional separators along with standard JS syntax
The splitter combines:
- Custom component tags as separators (e.g. <Component, <div)
- JavaScript syntax elements (function, const, if, etc)
- Standard text splitting on newlines
This allows chunks to break at natural boundaries in
React, Vue, and Svelte component code.
"""
def __init__(
self,
separators: Optional[list[str]] = None,
chunk_size: int = 2000,
chunk_overlap: int = 0,
**kwargs: Any,
) -> None:
"""Initialize the JS Framework text splitter.
Args:
separators: Optional list of custom separator strings to use
chunk_size: Maximum size of chunks to return
chunk_overlap: Overlap in characters between chunks
**kwargs: Additional arguments to pass to parent class
"""
super().__init__(chunk_size=chunk_size, chunk_overlap=chunk_overlap, **kwargs)
self._separators = separators or []
def split_text(self, text: str) -> list[str]:
"""Split text into chunks.
This method splits the text into chunks by:
- Extracting unique opening component tags using regex
- Creating separators list with extracted tags and JS separators
- Splitting the text using the separators by calling the parent class method
Args:
text: String containing code to split
Returns:
List of text chunks split on component and JS boundaries
"""
# Extract unique opening component tags using regex
# Regex to match opening tags, excluding self-closing tags
opening_tags = re.findall(r"<\s*([a-zA-Z0-9]+)[^>]*>", text)
component_tags = []
for tag in opening_tags:
if tag not in component_tags:
component_tags.append(tag)
component_separators = [f"<{tag}" for tag in component_tags]
js_separators = [
"\nexport ",
" export ",
"\nfunction ",
"\nasync function ",
" async function ",
"\nconst ",
"\nlet ",
"\nvar ",
"\nclass ",
" class ",
"\nif ",
" if ",
"\nfor ",
" for ",
"\nwhile ",
" while ",
"\nswitch ",
" switch ",
"\ncase ",
" case ",
"\ndefault ",
" default ",
]
separators = (
self._separators
+ js_separators
+ component_separators
+ ["<>", "\n\n", "&&\n", "||\n"]
)
self._separators = separators
return super().split_text(text)
|
import re
from typing import Any, Optional
from langchain_text_splitters import RecursiveCharacterTextSplitter
class JSFrameworkTextSplitter(RecursiveCharacterTextSplitter):
"""Text splitter that handles React (JSX), Vue, and Svelte code.
This splitter extends RecursiveCharacterTextSplitter to handle
React (JSX), Vue, and Svelte code by:
1. Detecting and extracting custom component tags from the text
2. Using those tags as additional separators along with standard JS syntax
The splitter combines:
- Custom component tags as separators (e.g. <Component, <div)
- JavaScript syntax elements (function, const, if, etc)
- Standard text splitting on newlines
This allows chunks to break at natural boundaries in
React, Vue, and Svelte component code.
"""
def __init__(
self,
separators: Optional[list[str]] = None,
chunk_size: int = 2000,
chunk_overlap: int = 0,
**kwargs: Any,
) -> None:
"""Initialize the JS Framework text splitter.
Args:
separators: Optional list of custom separator strings to use
chunk_size: Maximum size of chunks to return
chunk_overlap: Overlap in characters between chunks
**kwargs: Additional arguments to pass to parent class
"""
super().__init__(chunk_size=chunk_size, chunk_overlap=chunk_overlap, **kwargs)
self._separators = separators or []
def split_text(self, text: str) -> list[str]:
"""Split text into chunks.
This method splits the text into chunks by:
- Extracting unique opening component tags using regex
- Creating separators list with extracted tags and JS separators
- Splitting the text using the separators by calling the parent class method
Args:
text: String containing code to split
Returns:
List of text chunks split on component and JS boundaries
"""
# Extract unique opening component tags using regex
# Regex to match opening tags, excluding self-closing tags
opening_tags = re.findall(r"<\s*([a-zA-Z0-9]+)[^>]*>", text)
component_tags = []
for tag in opening_tags:
if tag not in component_tags:
component_tags.append(tag)
component_separators = [f"<{tag}" for tag in component_tags]
js_separators = [
"\nexport ",
" export ",
"\nfunction ",
"\nasync function ",
" async function ",
"\nconst ",
"\nlet ",
"\nvar ",
"\nclass ",
" class ",
"\nif ",
" if ",
"\nfor ",
" for ",
"\nwhile ",
" while ",
"\nswitch ",
" switch ",
"\ncase ",
" case ",
"\ndefault ",
" default ",
]
separators = (
self._separators
+ js_separators
+ component_separators
+ ["<>", "\n\n", "&&\n", "||\n"]
)
self._separators = separators
chunks = super().split_text(text)
return chunks
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class NASFCOS(SingleStageDetector):
"""NAS-FCOS: Fast Neural Architecture Search for Object Detection.
https://arxiv.org/abs/1906.0442
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class NASFCOS(SingleStageDetector):
"""NAS-FCOS: Fast Neural Architecture Search for Object Detection.
https://arxiv.org/abs/1906.0442
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
import types
from typing import Any
import torch._C
class _ClassNamespace(types.ModuleType):
def __init__(self, name: str) -> None:
super().__init__("torch.classes" + name)
self.name = name
def __getattr__(self, attr: str) -> Any:
proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
if proxy is None:
raise RuntimeError(f"Class {self.name}.{attr} not registered!")
return proxy
class _Classes(types.ModuleType):
__file__ = "_classes.py"
def __init__(self) -> None:
super().__init__("torch.classes")
def __getattr__(self, name: str) -> _ClassNamespace:
namespace = _ClassNamespace(name)
setattr(self, name, namespace)
return namespace
@property
def loaded_libraries(self) -> Any:
return torch.ops.loaded_libraries
def load_library(self, path: str) -> None:
"""
Loads a shared library from the given path into the current process.
The library being loaded may run global initialization code to register
custom classes with the PyTorch JIT runtime. This allows dynamically
loading custom classes. For this, you should compile your class
and the static registration code into a shared library object, and then
call ``torch.classes.load_library('path/to/libcustom.so')`` to load the
shared object.
After the library is loaded, it is added to the
``torch.classes.loaded_libraries`` attribute, a set that may be inspected
for the paths of all libraries loaded using this function.
Args:
path (str): A path to a shared library to load.
"""
torch.ops.load_library(path)
# The classes "namespace"
classes = _Classes()
|
# mypy: allow-untyped-defs
import types
import torch._C
class _ClassNamespace(types.ModuleType):
def __init__(self, name):
super().__init__("torch.classes" + name)
self.name = name
def __getattr__(self, attr):
proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
if proxy is None:
raise RuntimeError(f"Class {self.name}.{attr} not registered!")
return proxy
class _Classes(types.ModuleType):
__file__ = "_classes.py"
def __init__(self) -> None:
super().__init__("torch.classes")
def __getattr__(self, name):
namespace = _ClassNamespace(name)
setattr(self, name, namespace)
return namespace
@property
def loaded_libraries(self):
return torch.ops.loaded_libraries
def load_library(self, path):
"""
Loads a shared library from the given path into the current process.
The library being loaded may run global initialization code to register
custom classes with the PyTorch JIT runtime. This allows dynamically
loading custom classes. For this, you should compile your class
and the static registration code into a shared library object, and then
call ``torch.classes.load_library('path/to/libcustom.so')`` to load the
shared object.
After the library is loaded, it is added to the
``torch.classes.loaded_libraries`` attribute, a set that may be inspected
for the paths of all libraries loaded using this function.
Args:
path (str): A path to a shared library to load.
"""
torch.ops.load_library(path)
# The classes "namespace"
classes = _Classes()
|
import torch
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
import v2_extras
return torchvision.transforms.v2, torchvision.datapoints, v2_extras
else:
import transforms
return transforms, None, None
class SegmentationPresetTrain:
def __init__(
self,
*,
base_size,
crop_size,
hflip_prob=0.5,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
backend="pil",
use_v2=False,
):
T, datapoints, v2_extras = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImage())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.RandomResize(min_size=int(0.5 * base_size), max_size=int(2.0 * base_size))]
if hflip_prob > 0:
transforms += [T.RandomHorizontalFlip(hflip_prob)]
if use_v2:
# We need a custom pad transform here, since the padding we want to perform here is fundamentally
# different from the padding in `RandomCrop` if `pad_if_needed=True`.
transforms += [v2_extras.PadIfSmaller(crop_size, fill={datapoints.Mask: 255, "others": 0})]
transforms += [T.RandomCrop(crop_size)]
if backend == "pil":
transforms += [T.PILToTensor()]
if use_v2:
img_type = datapoints.Image if backend == "datapoint" else torch.Tensor
transforms += [
T.ToDtype(dtype={img_type: torch.float32, datapoints.Mask: torch.int64, "others": None}, scale=True)
]
else:
# No need to explicitly convert masks as they're magically int64 already
transforms += [T.ConvertImageDtype(torch.float)]
transforms += [T.Normalize(mean=mean, std=std)]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class SegmentationPresetEval:
def __init__(
self, *, base_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), backend="pil", use_v2=False
):
T, _, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImage()]
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if use_v2:
transforms += [T.Resize(size=(base_size, base_size))]
else:
transforms += [T.RandomResize(min_size=base_size, max_size=base_size)]
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImage() if use_v2 else T.PILToTensor()]
transforms += [
T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
import torch
def get_modules(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.datapoints
import torchvision.transforms.v2
import v2_extras
return torchvision.transforms.v2, torchvision.datapoints, v2_extras
else:
import transforms
return transforms, None, None
class SegmentationPresetTrain:
def __init__(
self,
*,
base_size,
crop_size,
hflip_prob=0.5,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
backend="pil",
use_v2=False,
):
T, datapoints, v2_extras = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "datapoint":
transforms.append(T.ToImageTensor())
elif backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
transforms += [T.RandomResize(min_size=int(0.5 * base_size), max_size=int(2.0 * base_size))]
if hflip_prob > 0:
transforms += [T.RandomHorizontalFlip(hflip_prob)]
if use_v2:
# We need a custom pad transform here, since the padding we want to perform here is fundamentally
# different from the padding in `RandomCrop` if `pad_if_needed=True`.
transforms += [v2_extras.PadIfSmaller(crop_size, fill={datapoints.Mask: 255, "others": 0})]
transforms += [T.RandomCrop(crop_size)]
if backend == "pil":
transforms += [T.PILToTensor()]
if use_v2:
img_type = datapoints.Image if backend == "datapoint" else torch.Tensor
transforms += [
T.ToDtype(dtype={img_type: torch.float32, datapoints.Mask: torch.int64, "others": None}, scale=True)
]
else:
# No need to explicitly convert masks as they're magically int64 already
transforms += [T.ConvertImageDtype(torch.float)]
transforms += [T.Normalize(mean=mean, std=std)]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
class SegmentationPresetEval:
def __init__(
self, *, base_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), backend="pil", use_v2=False
):
T, _, _ = get_modules(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms += [T.PILToTensor()]
elif backend == "datapoint":
transforms += [T.ToImageTensor()]
elif backend != "pil":
raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}")
if use_v2:
transforms += [T.Resize(size=(base_size, base_size))]
else:
transforms += [T.RandomResize(min_size=base_size, max_size=base_size)]
if backend == "pil":
# Note: we could just convert to pure tensors even in v2?
transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()]
transforms += [
T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
self.transforms = T.Compose(transforms)
def __call__(self, img, target):
return self.transforms(img, target)
|
import os
from jina import Flow, DocumentArray
cur_dir = os.path.dirname(__file__)
def test_install_reqs():
f = Flow().add(
install_requirements=True,
uses=os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'),
)
with f:
resp = f.post(on='/', inputs=DocumentArray.empty(2))
assert len(resp) == 2
|
import os
from jina import Flow, DocumentArray
cur_dir = os.path.dirname(__file__)
def test_install_reqs():
f = Flow().add(install_requirements=True, uses=os.path.join(os.path.join(cur_dir, 'exec'), 'config.yml'))
with f:
resp = f.post(on='/', inputs=DocumentArray.empty(2))
assert len(resp) == 2
|
# Copyright (c) OpenMMLab. All rights reserved.
from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
MaxIoUAssigner, RegionAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder,
PseudoBBoxCoder, TBLRBBoxCoder)
from .iou_calculators import BboxOverlaps2D, bbox_overlaps
from .samplers import (BaseSampler, CombinedSampler,
InstanceBalancedPosSampler, IoUBalancedNegSampler,
OHEMSampler, PseudoSampler, RandomSampler,
SamplingResult, ScoreHLRSampler)
from .transforms import (bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh,
distance2bbox, find_inside_bboxes, roi2bbox)
__all__ = [
'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder',
'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy',
'bbox_xyxy_to_cxcywh', 'RegionAssigner', 'find_inside_bboxes'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
MaxIoUAssigner, RegionAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder,
PseudoBBoxCoder, TBLRBBoxCoder)
from .iou_calculators import BboxOverlaps2D, bbox_overlaps
from .samplers import (BaseSampler, CombinedSampler,
InstanceBalancedPosSampler, IoUBalancedNegSampler,
OHEMSampler, PseudoSampler, RandomSampler,
SamplingResult, ScoreHLRSampler)
from .transforms import (bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh,
distance2bbox, roi2bbox)
__all__ = [
'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder',
'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy',
'bbox_xyxy_to_cxcywh', 'RegionAssigner'
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.constraints import deserialize as deserialize
from keras.src.constraints import get as get
from keras.src.constraints import serialize as serialize
from keras.src.constraints.constraints import Constraint as Constraint
from keras.src.constraints.constraints import MaxNorm as MaxNorm
from keras.src.constraints.constraints import MaxNorm as max_norm
from keras.src.constraints.constraints import MinMaxNorm as MinMaxNorm
from keras.src.constraints.constraints import MinMaxNorm as min_max_norm
from keras.src.constraints.constraints import NonNeg as NonNeg
from keras.src.constraints.constraints import NonNeg as non_neg
from keras.src.constraints.constraints import UnitNorm as UnitNorm
from keras.src.constraints.constraints import UnitNorm as unit_norm
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.constraints import deserialize
from keras.src.constraints import get
from keras.src.constraints import serialize
from keras.src.constraints.constraints import Constraint
from keras.src.constraints.constraints import MaxNorm
from keras.src.constraints.constraints import MaxNorm as max_norm
from keras.src.constraints.constraints import MinMaxNorm
from keras.src.constraints.constraints import MinMaxNorm as min_max_norm
from keras.src.constraints.constraints import NonNeg
from keras.src.constraints.constraints import NonNeg as non_neg
from keras.src.constraints.constraints import UnitNorm
from keras.src.constraints.constraints import UnitNorm as unit_norm
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
class Text(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`Text.url`), a str (`Text.text`),
and an AnyEmbedding (`Text.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import Text
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can extend this Document:
.. code-block:: python
from docarray import Text
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument, Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[AnyEmbedding] = None
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import Embedding
class Text(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`Text.url`), a str (`Text.text`),
and an Embedding (`Text.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import Text
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can extend this Document:
.. code-block:: python
from docarray import Text
from docarray.typing import Embedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[Embedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument, Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[Embedding] = None
|
_base_ = ['./yolov3_mobilenetv2_mstrain-416_300e_coco.py']
# yapf:disable
model = dict(
bbox_head=dict(
anchor_generator=dict(
base_sizes=[[(220, 125), (128, 222), (264, 266)],
[(35, 87), (102, 96), (60, 170)],
[(10, 15), (24, 36), (72, 42)]])))
# yapf:enable
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
input_size = (320, 320)
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = ['./yolov3_mobilenetv2_mstrain-416_300e_coco.py']
# yapf:disable
model = dict(
bbox_head=dict(
anchor_generator=dict(
base_sizes=[[(220, 125), (128, 222), (264, 266)],
[(35, 87), (102, 96), (60, 170)],
[(10, 15), (24, 36), (72, 42)]])))
# yapf:enable
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(320, 320), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(320, 320),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import Mesh3DUrl, NdArray
from docarray.typing.url.mimetypes import (
OBJ_MIMETYPE,
AUDIO_MIMETYPE,
VIDEO_MIMETYPE,
IMAGE_MIMETYPE,
TEXT_MIMETYPE,
)
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
url = parse_obj_as(Mesh3DUrl, file_path)
tensors = url.load()
assert isinstance(tensors.vertices, np.ndarray)
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, np.ndarray)
assert isinstance(tensors.faces, NdArray)
assert tensors.vertices.shape[1] == 3
assert tensors.faces.shape[1] == 3
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_path',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
@pytest.mark.parametrize('field', ['vertices', 'faces'])
def test_load_one_of_fields(file_path, field):
url = parse_obj_as(Mesh3DUrl, file_path)
field = getattr(url.load(), field)
assert isinstance(field, np.ndarray)
assert isinstance(field, NdArray)
def test_json_schema():
schema_json_of(Mesh3DUrl)
def test_dump_json():
url = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(Mesh3DUrl, path_to_file)
assert isinstance(url, Mesh3DUrl)
assert isinstance(url, str)
@pytest.mark.proto
def test_proto_mesh_url():
uri = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
@pytest.mark.parametrize(
'file_type, file_source',
[
(OBJ_MIMETYPE, MESH_FILES['obj']),
(OBJ_MIMETYPE, MESH_FILES['glb']),
(OBJ_MIMETYPE, MESH_FILES['ply']),
(OBJ_MIMETYPE, REMOTE_OBJ_FILE),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.aac')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.mp3')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.ogg')),
(VIDEO_MIMETYPE, os.path.join(TOYDATA_DIR, 'mov_bbb.mp4')),
(IMAGE_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.png')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.html')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.md')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'penal_colony.txt')),
],
)
def test_file_validation(file_type, file_source):
if file_type != Mesh3DUrl.mime_type():
with pytest.raises(ValueError):
parse_obj_as(Mesh3DUrl, file_source)
else:
parse_obj_as(Mesh3DUrl, file_source)
|
import os
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import Mesh3DUrl, NdArray
from docarray.typing.url.mimetypes import (
OBJ_MIMETYPE,
AUDIO_MIMETYPE,
VIDEO_MIMETYPE,
IMAGE_MIMETYPE,
TEXT_MIMETYPE,
)
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
url = parse_obj_as(Mesh3DUrl, file_path)
tensors = url.load()
assert isinstance(tensors.vertices, np.ndarray)
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, np.ndarray)
assert isinstance(tensors.faces, NdArray)
assert tensors.vertices.shape[1] == 3
assert tensors.faces.shape[1] == 3
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_path',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
@pytest.mark.parametrize('field', ['vertices', 'faces'])
def test_load_one_of_fields(file_path, field):
url = parse_obj_as(Mesh3DUrl, file_path)
field = getattr(url.load(), field)
assert isinstance(field, np.ndarray)
assert isinstance(field, NdArray)
def test_json_schema():
schema_json_of(Mesh3DUrl)
def test_dump_json():
url = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(Mesh3DUrl, path_to_file)
assert isinstance(url, Mesh3DUrl)
assert isinstance(url, str)
@pytest.mark.proto
def test_proto_mesh_url():
uri = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
@pytest.mark.parametrize(
'file_type, file_source',
[
(OBJ_MIMETYPE, MESH_FILES['obj']),
(OBJ_MIMETYPE, MESH_FILES['glb']),
(OBJ_MIMETYPE, MESH_FILES['ply']),
(OBJ_MIMETYPE, REMOTE_OBJ_FILE),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.aac')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.mp3')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.ogg')),
(VIDEO_MIMETYPE, os.path.join(TOYDATA_DIR, 'mov_bbb.mp4')),
(IMAGE_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.png')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.html')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.md')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'penal_colony.txt')),
],
)
def test_file_validation(file_type, file_source):
if file_type != Mesh3DUrl.mime_type():
with pytest.raises(ValueError):
parse_obj_as(Mesh3DUrl, file_source)
else:
parse_obj_as(Mesh3DUrl, file_source)
|
"""Output classes.
**Output** classes are used to represent the output of a language model call
and the output of a chat.
The top container for information is the `LLMResult` object. `LLMResult` is used by
both chat models and LLMs. This object contains the output of the language
model and any additional information that the model provider wants to return.
When invoking models via the standard runnable methods (e.g. invoke, batch, etc.):
- Chat models will return `AIMessage` objects.
- LLMs will return regular text strings.
In addition, users can access the raw output of either LLMs or chat models via
callbacks. The on_chat_model_end and on_llm_end callbacks will return an
LLMResult object containing the generated outputs and any additional information
returned by the model provider.
In general, if information is already available
in the AIMessage object, it is recommended to access it from there rather than
from the `LLMResult` object.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.outputs.chat_generation import (
ChatGeneration,
ChatGenerationChunk,
)
from langchain_core.outputs.chat_result import ChatResult
from langchain_core.outputs.generation import Generation, GenerationChunk
from langchain_core.outputs.llm_result import LLMResult
from langchain_core.outputs.run_info import RunInfo
__all__ = [
"ChatGeneration",
"ChatGenerationChunk",
"ChatResult",
"Generation",
"GenerationChunk",
"LLMResult",
"RunInfo",
]
_dynamic_imports = {
"ChatGeneration": "chat_generation",
"ChatGenerationChunk": "chat_generation",
"ChatResult": "chat_result",
"Generation": "generation",
"GenerationChunk": "generation",
"LLMResult": "llm_result",
"RunInfo": "run_info",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Output classes.
**Output** classes are used to represent the output of a language model call
and the output of a chat.
The top container for information is the `LLMResult` object. `LLMResult` is used by
both chat models and LLMs. This object contains the output of the language
model and any additional information that the model provider wants to return.
When invoking models via the standard runnable methods (e.g. invoke, batch, etc.):
- Chat models will return `AIMessage` objects.
- LLMs will return regular text strings.
In addition, users can access the raw output of either LLMs or chat models via
callbacks. The on_chat_model_end and on_llm_end callbacks will return an
LLMResult object containing the generated outputs and any additional information
returned by the model provider.
In general, if information is already available
in the AIMessage object, it is recommended to access it from there rather than
from the `LLMResult` object.
"""
from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk
from langchain_core.outputs.chat_result import ChatResult
from langchain_core.outputs.generation import Generation, GenerationChunk
from langchain_core.outputs.llm_result import LLMResult
from langchain_core.outputs.run_info import RunInfo
__all__ = [
"ChatGeneration",
"ChatGenerationChunk",
"ChatResult",
"Generation",
"GenerationChunk",
"LLMResult",
"RunInfo",
]
|
from abc import ABC, abstractmethod
from typing import Callable, List, Sequence, Optional, Union
from llama_index.core.agent.workflow.workflow_events import (
AgentOutput,
ToolCallResult,
)
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
ConfigDict,
field_validator,
)
from llama_index.core.llms import ChatMessage, LLM
from llama_index.core.memory import BaseMemory
from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType, PromptDictType
from llama_index.core.tools import BaseTool, AsyncBaseTool, FunctionTool
from llama_index.core.workflow import Context
from llama_index.core.objects import ObjectRetriever
from llama_index.core.settings import Settings
def get_default_llm() -> LLM:
return Settings.llm
class BaseWorkflowAgent(BaseModel, PromptMixin, ABC):
"""Base class for all agents, combining config and logic."""
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str = Field(description="The name of the agent")
description: str = Field(
description="The description of what the agent does and is responsible for"
)
system_prompt: Optional[str] = Field(
default=None, description="The system prompt for the agent"
)
tools: Optional[List[BaseTool]] = Field(
default=None, description="The tools that the agent can use"
)
tool_retriever: Optional[ObjectRetriever] = Field(
default=None,
description="The tool retriever for the agent, can be provided instead of tools",
)
can_handoff_to: Optional[List[str]] = Field(
default=None, description="The agent names that this agent can hand off to"
)
llm: LLM = Field(
default_factory=get_default_llm, description="The LLM that the agent uses"
)
@field_validator("tools", mode="before")
def validate_tools(
cls, v: Optional[Sequence[Union[BaseTool, Callable]]]
) -> Optional[Sequence[BaseTool]]:
"""Validate tools.
If tools are not of type BaseTool, they will be converted to FunctionTools.
This assumes the inputs are tools or callable functions.
"""
if v is None:
return None
validated_tools: List[BaseTool] = []
for tool in v:
if not isinstance(tool, BaseTool):
validated_tools.append(FunctionTool.from_defaults(tool))
else:
validated_tools.append(tool)
for tool in validated_tools:
if tool.metadata.name == "handoff":
raise ValueError(
"'handoff' is a reserved tool name. Please use a different name."
)
return validated_tools # type: ignore[return-value]
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {}
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
"""Update prompts."""
@abstractmethod
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the agent."""
@abstractmethod
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results."""
@abstractmethod
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""Finalize the agent's execution."""
|
from abc import ABC, abstractmethod
from typing import Callable, List, Sequence, Optional, Union
from llama_index.core.agent.workflow.workflow_events import (
AgentOutput,
ToolCallResult,
)
from llama_index.core.bridge.pydantic import (
BaseModel,
Field,
ConfigDict,
field_validator,
)
from llama_index.core.llms import ChatMessage, LLM
from llama_index.core.memory import BaseMemory
from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType, PromptDictType
from llama_index.core.tools import BaseTool, AsyncBaseTool, FunctionTool
from llama_index.core.workflow import Context
from llama_index.core.objects import ObjectRetriever
from llama_index.core.settings import Settings
def get_default_llm() -> LLM:
return Settings.llm
class BaseWorkflowAgent(BaseModel, PromptMixin, ABC):
"""Base class for all agents, combining config and logic."""
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str = Field(description="The name of the agent")
description: str = Field(
description="The description of what the agent does and is responsible for"
)
system_prompt: Optional[str] = Field(
default=None, description="The system prompt for the agent"
)
tools: Optional[List[BaseTool]] = Field(
default=None, description="The tools that the agent can use"
)
tool_retriever: Optional[ObjectRetriever] = Field(
default=None,
description="The tool retriever for the agent, can be provided instead of tools",
)
can_handoff_to: Optional[List[str]] = Field(
default=None, description="The agent names that this agent can hand off to"
)
llm: LLM = Field(
default_factory=get_default_llm, description="The LLM that the agent uses"
)
@field_validator("tools", mode="before")
def validate_tools(
cls, v: Optional[Sequence[Union[BaseTool, Callable]]]
) -> Optional[Sequence[BaseTool]]:
"""Validate tools.
If tools are not of type BaseTool, they will be converted to FunctionTools.
This assumes the inputs are tools or callable functions.
"""
if v is None:
return None
validated_tools: List[BaseTool] = []
for tool in v:
if not isinstance(tool, BaseTool):
validated_tools.append(FunctionTool.from_defaults(tool))
else:
validated_tools.append(tool)
return validated_tools # type: ignore[return-value]
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {}
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
"""Update prompts."""
@abstractmethod
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the agent."""
@abstractmethod
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results."""
@abstractmethod
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""Finalize the agent's execution."""
|
from docarray.typing.id import ID
from docarray.typing.tensor import Tensor, TorchTensor
from docarray.typing.tensor.embedding import Embedding
from docarray.typing.url import AnyUrl, ImageUrl
__all__ = [
'TorchTensor',
'Tensor',
'Embedding',
'ImageUrl',
'AnyUrl',
'ID',
]
|
from docarray.typing.embedding import Embedding
from docarray.typing.id import ID
from docarray.typing.tensor import Tensor, TorchTensor
from docarray.typing.url import AnyUrl, ImageUrl
__all__ = ['Tensor', 'Embedding', 'ImageUrl', 'AnyUrl', 'ID', 'TorchTensor']
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from ..builder import DETECTORS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@DETECTORS.register_module()
class SOLOv2(SingleStageInstanceSegmentor):
"""`SOLOv2: Dynamic and Fast Instance Segmentation
<https://arxiv.org/abs/2003.10152>`_
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
mask_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@DETECTORS.register_module()
class SOLOv2(SingleStageInstanceSegmentor):
"""`SOLOv2: Dynamic and Fast Instance Segmentation
<https://arxiv.org/abs/2003.10152>`_
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None,
pretrained=None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
pretrained=pretrained)
|
import logging
from backend.data import integrations
from backend.data.model import Credentials
from ._base import WT, BaseWebhooksManager
logger = logging.getLogger(__name__)
class ManualWebhookManagerBase(BaseWebhooksManager[WT]):
async def _register_webhook(
self,
credentials: Credentials,
webhook_type: WT,
resource: str,
events: list[str],
ingress_url: str,
secret: str,
) -> tuple[str, dict]:
print(ingress_url) # FIXME: pass URL to user in front end
return "", {}
async def _deregister_webhook(
self,
webhook: integrations.Webhook,
credentials: Credentials,
) -> None:
pass
|
import logging
from backend.data import integrations
from backend.data.model import APIKeyCredentials, Credentials, OAuth2Credentials
from ._base import WT, BaseWebhooksManager
logger = logging.getLogger(__name__)
class ManualWebhookManagerBase(BaseWebhooksManager[WT]):
async def _register_webhook(
self,
credentials: Credentials,
webhook_type: WT,
resource: str,
events: list[str],
ingress_url: str,
secret: str,
) -> tuple[str, dict]:
print(ingress_url) # FIXME: pass URL to user in front end
return "", {}
async def _deregister_webhook(
self,
webhook: integrations.Webhook,
credentials: OAuth2Credentials | APIKeyCredentials,
) -> None:
pass
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='tiny',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[96, 192, 384, 768]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise',
'num_layers': 6
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# TODO: delete custom_imports after mmcls supports auto import
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='tiny',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[96, 192, 384, 768]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise',
'num_layers': 6
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule, Linear
from mmcv.runner import ModuleList, auto_fp16
from mmdet.models.builder import HEADS
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class CoarseMaskHead(FCNMaskHead):
"""Coarse mask head used in PointRend.
Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
the input feature map instead of upsample it.
Args:
num_convs (int): Number of conv layers in the head. Default: 0.
num_fcs (int): Number of fc layers in the head. Default: 2.
fc_out_channels (int): Number of output channels of fc layer.
Default: 1024.
downsample_factor (int): The factor that feature map is downsampled by.
Default: 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs=0,
num_fcs=2,
fc_out_channels=1024,
downsample_factor=2,
init_cfg=dict(
type='Xavier',
override=[
dict(name='fcs'),
dict(type='Constant', val=0.001, name='fc_logits')
]),
*arg,
**kwarg):
super(CoarseMaskHead, self).__init__(
*arg,
num_convs=num_convs,
upsample_cfg=dict(type=None),
init_cfg=None,
**kwarg)
self.init_cfg = init_cfg
self.num_fcs = num_fcs
assert self.num_fcs > 0
self.fc_out_channels = fc_out_channels
self.downsample_factor = downsample_factor
assert self.downsample_factor >= 1
# remove conv_logit
delattr(self, 'conv_logits')
if downsample_factor > 1:
downsample_in_channels = (
self.conv_out_channels
if self.num_convs > 0 else self.in_channels)
self.downsample_conv = ConvModule(
downsample_in_channels,
self.conv_out_channels,
kernel_size=downsample_factor,
stride=downsample_factor,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
else:
self.downsample_conv = None
self.output_size = (self.roi_feat_size[0] // downsample_factor,
self.roi_feat_size[1] // downsample_factor)
self.output_area = self.output_size[0] * self.output_size[1]
last_layer_dim = self.conv_out_channels * self.output_area
self.fcs = ModuleList()
for i in range(num_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
output_channels = self.num_classes * self.output_area
self.fc_logits = Linear(last_layer_dim, output_channels)
def init_weights(self):
super(FCNMaskHead, self).init_weights()
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.downsample_conv is not None:
x = self.downsample_conv(x)
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_pred = self.fc_logits(x).view(
x.size(0), self.num_classes, *self.output_size)
return mask_pred
|
from mmcv.cnn import ConvModule, Linear
from mmcv.runner import ModuleList, auto_fp16
from mmdet.models.builder import HEADS
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class CoarseMaskHead(FCNMaskHead):
"""Coarse mask head used in PointRend.
Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
the input feature map instead of upsample it.
Args:
num_convs (int): Number of conv layers in the head. Default: 0.
num_fcs (int): Number of fc layers in the head. Default: 2.
fc_out_channels (int): Number of output channels of fc layer.
Default: 1024.
downsample_factor (int): The factor that feature map is downsampled by.
Default: 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs=0,
num_fcs=2,
fc_out_channels=1024,
downsample_factor=2,
init_cfg=dict(
type='Xavier',
override=[
dict(name='fcs'),
dict(type='Constant', val=0.001, name='fc_logits')
]),
*arg,
**kwarg):
super(CoarseMaskHead, self).__init__(
*arg,
num_convs=num_convs,
upsample_cfg=dict(type=None),
init_cfg=None,
**kwarg)
self.init_cfg = init_cfg
self.num_fcs = num_fcs
assert self.num_fcs > 0
self.fc_out_channels = fc_out_channels
self.downsample_factor = downsample_factor
assert self.downsample_factor >= 1
# remove conv_logit
delattr(self, 'conv_logits')
if downsample_factor > 1:
downsample_in_channels = (
self.conv_out_channels
if self.num_convs > 0 else self.in_channels)
self.downsample_conv = ConvModule(
downsample_in_channels,
self.conv_out_channels,
kernel_size=downsample_factor,
stride=downsample_factor,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
else:
self.downsample_conv = None
self.output_size = (self.roi_feat_size[0] // downsample_factor,
self.roi_feat_size[1] // downsample_factor)
self.output_area = self.output_size[0] * self.output_size[1]
last_layer_dim = self.conv_out_channels * self.output_area
self.fcs = ModuleList()
for i in range(num_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
output_channels = self.num_classes * self.output_area
self.fc_logits = Linear(last_layer_dim, output_channels)
def init_weights(self):
super(FCNMaskHead, self).init_weights()
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.downsample_conv is not None:
x = self.downsample_conv(x)
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_pred = self.fc_logits(x).view(
x.size(0), self.num_classes, *self.output_size)
return mask_pred
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=False)
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=False,
batch_augments=batch_augments)
model = dict(
data_preprocessor=data_preprocessor,
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
# large scale jittering
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
resize_type='Resize',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=False)
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=False,
batch_augments=batch_augments)
model = dict(
data_preprocessor=data_preprocessor,
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
# large scale jittering
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
resize_type='Resize',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
import pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
NOT_DEVICE_TESTS = {
"test_tokenization",
"test_tokenization_mistral_common",
"test_processor",
"test_processing",
"test_beam_constraints",
"test_configuration_utils",
"test_data_collator",
"test_trainer_callback",
"test_trainer_utils",
"test_feature_extraction",
"test_image_processing",
"test_image_processor",
"test_image_transforms",
"test_optimization",
"test_retrieval",
"test_config",
"test_from_pretrained_no_checkpoint",
"test_keep_in_fp32_modules",
"test_gradient_checkpointing_backward_compatibility",
"test_gradient_checkpointing_enable_disable",
"test_torch_save_load",
"test_initialization",
"test_forward_signature",
"test_model_get_set_embeddings",
"test_model_main_input_name",
"test_correct_missing_keys",
"test_tie_model_weights",
"test_can_use_safetensors",
"test_load_save_without_tied_weights",
"test_tied_weights_keys",
"test_model_weights_reload_no_missing_tied_weights",
"test_mismatched_shapes_have_properly_initialized_weights",
"test_matched_shapes_have_loaded_weights_when_some_mismatched_shapes_exist",
"test_model_is_small",
"test_tf_from_pt_safetensors",
"test_flax_from_pt_safetensors",
"ModelTest::test_pipeline_", # None of the pipeline tests from PipelineTesterMixin (of which XxxModelTest inherits from) are running on device
"ModelTester::test_pipeline_",
"/repo_utils/",
"/utils/",
}
# allow having multiple repository checkouts and not needing to remember to rerun
# `pip install -e '.[dev]'` when switching between checkouts and running tests.
git_repo_path = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def pytest_configure(config):
config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested")
config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment")
config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate")
config.addinivalue_line("markers", "not_device_test: mark the tests always running on cpu")
def pytest_collection_modifyitems(items):
for item in items:
if any(test_name in item.nodeid for test_name in NOT_DEVICE_TESTS):
item.add_marker(pytest.mark.not_device_test)
def pytest_addoption(parser):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(parser)
def pytest_terminal_summary(terminalreporter):
from transformers.testing_utils import pytest_terminal_summary_main
make_reports = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(terminalreporter, id=make_reports)
def pytest_sessionfinish(session, exitstatus):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
session.exitstatus = 0
# Doctest custom flag to ignore output.
IGNORE_RESULT = doctest.register_optionflag("IGNORE_RESULT")
OutputChecker = doctest.OutputChecker
class CustomOutputChecker(OutputChecker):
def check_output(self, want, got, optionflags):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self, want, got, optionflags)
doctest.OutputChecker = CustomOutputChecker
_pytest.doctest.DoctestModule = HfDoctestModule
doctest.DocTestParser = HfDocTestParser
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
import pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
NOT_DEVICE_TESTS = {
"test_tokenization",
"test_processor",
"test_processing",
"test_beam_constraints",
"test_configuration_utils",
"test_data_collator",
"test_trainer_callback",
"test_trainer_utils",
"test_feature_extraction",
"test_image_processing",
"test_image_processor",
"test_image_transforms",
"test_optimization",
"test_retrieval",
"test_config",
"test_from_pretrained_no_checkpoint",
"test_keep_in_fp32_modules",
"test_gradient_checkpointing_backward_compatibility",
"test_gradient_checkpointing_enable_disable",
"test_torch_save_load",
"test_initialization",
"test_forward_signature",
"test_model_get_set_embeddings",
"test_model_main_input_name",
"test_correct_missing_keys",
"test_tie_model_weights",
"test_can_use_safetensors",
"test_load_save_without_tied_weights",
"test_tied_weights_keys",
"test_model_weights_reload_no_missing_tied_weights",
"test_mismatched_shapes_have_properly_initialized_weights",
"test_matched_shapes_have_loaded_weights_when_some_mismatched_shapes_exist",
"test_model_is_small",
"test_tf_from_pt_safetensors",
"test_flax_from_pt_safetensors",
"ModelTest::test_pipeline_", # None of the pipeline tests from PipelineTesterMixin (of which XxxModelTest inherits from) are running on device
"ModelTester::test_pipeline_",
"/repo_utils/",
"/utils/",
}
# allow having multiple repository checkouts and not needing to remember to rerun
# `pip install -e '.[dev]'` when switching between checkouts and running tests.
git_repo_path = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def pytest_configure(config):
config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested")
config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment")
config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate")
config.addinivalue_line("markers", "not_device_test: mark the tests always running on cpu")
def pytest_collection_modifyitems(items):
for item in items:
if any(test_name in item.nodeid for test_name in NOT_DEVICE_TESTS):
item.add_marker(pytest.mark.not_device_test)
def pytest_addoption(parser):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(parser)
def pytest_terminal_summary(terminalreporter):
from transformers.testing_utils import pytest_terminal_summary_main
make_reports = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(terminalreporter, id=make_reports)
def pytest_sessionfinish(session, exitstatus):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
session.exitstatus = 0
# Doctest custom flag to ignore output.
IGNORE_RESULT = doctest.register_optionflag("IGNORE_RESULT")
OutputChecker = doctest.OutputChecker
class CustomOutputChecker(OutputChecker):
def check_output(self, want, got, optionflags):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self, want, got, optionflags)
doctest.OutputChecker = CustomOutputChecker
_pytest.doctest.DoctestModule = HfDoctestModule
doctest.DocTestParser = HfDocTestParser
|
"""Test OCI Generative AI embedding service."""
from unittest.mock import MagicMock
import pytest
from pytest import MonkeyPatch
from langchain_community.embeddings import OCIGenAIEmbeddings
class MockResponseDict(dict):
def __getattr__(self, val): # type: ignore[no-untyped-def]
return self[val]
@pytest.mark.requires("oci")
@pytest.mark.parametrize(
"test_model_id", ["cohere.embed-english-light-v3.0", "cohere.embed-english-v3.0"]
)
def test_embedding_call(monkeypatch: MonkeyPatch, test_model_id: str) -> None:
"""Test valid call to OCI Generative AI embedding service."""
oci_gen_ai_client = MagicMock()
embeddings = OCIGenAIEmbeddings(
model_id=test_model_id,
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
client=oci_gen_ai_client,
)
def mocked_response(invocation_obj): # type: ignore[no-untyped-def]
docs = invocation_obj.inputs
embeddings = []
for d in docs:
if "Hello" in d:
v = [1.0, 0.0, 0.0]
elif "World" in d:
v = [0.0, 1.0, 0.0]
else:
v = [0.0, 0.0, 1.0]
embeddings.append(v)
return MockResponseDict(
{
"status": 200,
"data": MockResponseDict({"embeddings": embeddings}),
}
)
monkeypatch.setattr(embeddings.client, "embed_text", mocked_response)
output = embeddings.embed_documents(["Hello", "World"])
correct_output = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
assert output == correct_output
|
"""Test OCI Generative AI embedding service."""
from unittest.mock import MagicMock
import pytest
from pytest import MonkeyPatch
from langchain_community.embeddings import OCIGenAIEmbeddings
class MockResponseDict(dict):
def __getattr__(self, val): # type: ignore[no-untyped-def]
return self[val]
@pytest.mark.requires("oci")
@pytest.mark.parametrize(
"test_model_id", ["cohere.embed-english-light-v3.0", "cohere.embed-english-v3.0"]
)
def test_embedding_call(monkeypatch: MonkeyPatch, test_model_id: str) -> None:
"""Test valid call to OCI Generative AI embedding service."""
oci_gen_ai_client = MagicMock()
embeddings = OCIGenAIEmbeddings( # type: ignore[call-arg]
model_id=test_model_id,
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
client=oci_gen_ai_client,
)
def mocked_response(invocation_obj): # type: ignore[no-untyped-def]
docs = invocation_obj.inputs
embeddings = []
for d in docs:
if "Hello" in d:
v = [1.0, 0.0, 0.0]
elif "World" in d:
v = [0.0, 1.0, 0.0]
else:
v = [0.0, 0.0, 1.0]
embeddings.append(v)
return MockResponseDict(
{
"status": 200,
"data": MockResponseDict({"embeddings": embeddings}),
}
)
monkeypatch.setattr(embeddings.client, "embed_text", mocked_response)
output = embeddings.embed_documents(["Hello", "World"])
correct_output = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
assert output == correct_output
|
import keras.src.backend
from keras.src import tree
from keras.src.layers.layer import Layer
from keras.src.random.seed_generator import SeedGenerator
from keras.src.utils import backend_utils
from keras.src.utils import jax_utils
from keras.src.utils import tracking
class TFDataLayer(Layer):
"""Layer that can safely used in a tf.data pipeline.
The `call()` method must solely rely on `self.backend` ops.
Only supports a single input tensor argument.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.backend = backend_utils.DynamicBackend()
self._allow_non_tensor_positional_args = True
def __call__(self, inputs, **kwargs):
sample_input = tree.flatten(inputs)[0]
if (
not isinstance(sample_input, keras.KerasTensor)
and backend_utils.in_tf_graph()
and not jax_utils.is_in_jax_tracing_scope(sample_input)
):
# We're in a TF graph, e.g. a tf.data pipeline.
self.backend.set_backend("tensorflow")
inputs = tree.map_structure(
lambda x: self.backend.convert_to_tensor(
x, dtype=self.compute_dtype
),
inputs,
)
switch_convert_input_args = False
if self._convert_input_args:
self._convert_input_args = False
switch_convert_input_args = True
try:
outputs = super().__call__(inputs, **kwargs)
finally:
self.backend.reset()
if switch_convert_input_args:
self._convert_input_args = True
return outputs
return super().__call__(inputs, **kwargs)
@tracking.no_automatic_dependency_tracking
def _get_seed_generator(self, backend=None):
if backend is None or backend == keras.backend.backend():
return self.generator
if not hasattr(self, "_backend_generators"):
self._backend_generators = {}
if backend in self._backend_generators:
return self._backend_generators[backend]
seed_generator = SeedGenerator(self.seed, backend=self.backend)
self._backend_generators[backend] = seed_generator
return seed_generator
def convert_weight(self, weight):
"""Convert the weight if it is from the a different backend."""
if self.backend.name == keras.backend.backend():
return weight
else:
weight = keras.ops.convert_to_numpy(weight)
return self.backend.convert_to_tensor(weight)
|
import keras.src.backend
from keras.src import tree
from keras.src.layers.layer import Layer
from keras.src.random.seed_generator import SeedGenerator
from keras.src.utils import backend_utils
from keras.src.utils import tracking
class TFDataLayer(Layer):
"""Layer that can safely used in a tf.data pipeline.
The `call()` method must solely rely on `self.backend` ops.
Only supports a single input tensor argument.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.backend = backend_utils.DynamicBackend()
self._allow_non_tensor_positional_args = True
def __call__(self, inputs, **kwargs):
if backend_utils.in_tf_graph() and not isinstance(
inputs, keras.KerasTensor
):
# We're in a TF graph, e.g. a tf.data pipeline.
self.backend.set_backend("tensorflow")
inputs = tree.map_structure(
lambda x: self.backend.convert_to_tensor(
x, dtype=self.compute_dtype
),
inputs,
)
switch_convert_input_args = False
if self._convert_input_args:
self._convert_input_args = False
switch_convert_input_args = True
try:
outputs = super().__call__(inputs, **kwargs)
finally:
self.backend.reset()
if switch_convert_input_args:
self._convert_input_args = True
return outputs
return super().__call__(inputs, **kwargs)
@tracking.no_automatic_dependency_tracking
def _get_seed_generator(self, backend=None):
if backend is None or backend == keras.backend.backend():
return self.generator
if not hasattr(self, "_backend_generators"):
self._backend_generators = {}
if backend in self._backend_generators:
return self._backend_generators[backend]
seed_generator = SeedGenerator(self.seed, backend=self.backend)
self._backend_generators[backend] = seed_generator
return seed_generator
def convert_weight(self, weight):
"""Convert the weight if it is from the a different backend."""
if self.backend.name == keras.backend.backend():
return weight
else:
weight = keras.ops.convert_to_numpy(weight)
return self.backend.convert_to_tensor(weight)
|
import numpy as np
import torch
from docarray import BaseDocument, DocumentArray, Image, Text
from docarray.typing import (
AnyTensor,
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(BaseDocument):
image: Image
text: Text
class MySUperDoc(BaseDocument):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class NestedDoc(BaseDocument):
tensor: NdArray
class MyDoc(BaseDocument):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: AnyTensor
generic_torch_tensor: AnyTensor
embedding: Embedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
nested_docs: DocumentArray[NestedDoc]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
nested_docs=DocumentArray[NestedDoc]([NestedDoc(tensor=np.zeros((128,)))]),
)
doc = MyDoc.from_protobuf(doc.to_protobuf())
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
import numpy as np
import torch
from docarray import Document, DocumentArray, Image, Text
from docarray.typing import (
AnyTensor,
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(Document):
image: Image
text: Text
class MySUperDoc(Document):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class NestedDoc(Document):
tensor: NdArray
class MyDoc(Document):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: AnyTensor
generic_torch_tensor: AnyTensor
embedding: Embedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
nested_docs: DocumentArray[NestedDoc]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
nested_docs=DocumentArray[NestedDoc]([NestedDoc(tensor=np.zeros((128,)))]),
)
doc = MyDoc.from_protobuf(doc.to_protobuf())
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.